241 lines
8.3 KiB
Python
Raw Normal View History

2025-11-29 00:50:53 +01:00
#!/usr/bin/env python3
"""
Tikker Performance Benchmark Script
Measures and reports performance metrics for all services.
Generates benchmark reports with detailed statistics.
"""
import time
import json
import statistics
import sys
from typing import Dict, List, Tuple
from pathlib import Path
from datetime import datetime
import requests
from requests.exceptions import RequestException
class BenchmarkRunner:
"""Run benchmarks against services."""
def __init__(self, base_url: str = "http://localhost", verbose: bool = False):
self.base_url = base_url
self.verbose = verbose
self.results: Dict[str, List[float]] = {}
def _request(self, method: str, service_port: int, endpoint: str,
json_data: Dict = None, timeout: int = 30) -> Tuple[int, float]:
"""Make HTTP request and measure latency."""
url = f"{self.base_url}:{service_port}{endpoint}"
start = time.time()
try:
if method.upper() == "GET":
response = requests.get(url, timeout=timeout)
else:
response = requests.post(url, json=json_data, timeout=timeout)
elapsed = (time.time() - start) * 1000
if self.verbose:
print(f" {method} {endpoint}: {elapsed:.2f}ms -> {response.status_code}")
return response.status_code, elapsed
except RequestException as e:
elapsed = (time.time() - start) * 1000
if self.verbose:
print(f" {method} {endpoint}: {elapsed:.2f}ms -> ERROR: {e}")
return 0, elapsed
def record(self, name: str, latency: float):
"""Record latency measurement."""
if name not in self.results:
self.results[name] = []
self.results[name].append(latency)
def benchmark_api(self, iterations: int = 10):
"""Benchmark main API endpoints."""
print("\n=== API Service Benchmark ===")
endpoints = [
("GET", 8000, "/health", None, "health"),
("GET", 8000, "/", None, "root"),
("GET", 8000, "/api/stats/daily", None, "daily_stats"),
("GET", 8000, "/api/words/top?limit=10", None, "top_words"),
]
for i in range(iterations):
if i > 0 and i % (iterations // 4) == 0:
print(f" Progress: {i}/{iterations}")
for method, port, endpoint, _, name in endpoints:
status, latency = self._request(method, port, endpoint, json_data)
if status in [200, 503]:
self.record(f"api_{name}", latency)
def benchmark_ai(self, iterations: int = 5):
"""Benchmark AI service."""
print("\n=== AI Service Benchmark ===")
payload = {
"text": "This is a test message for keystroke pattern analysis",
"analysis_type": "general"
}
for i in range(iterations):
if i > 0 and i % max(1, iterations // 2) == 0:
print(f" Progress: {i}/{iterations}")
status, latency = self._request("GET", 8001, "/health", None)
if status in [200, 503]:
self.record("ai_health", latency)
status, latency = self._request("POST", 8001, "/analyze", payload)
if status in [200, 503]:
self.record("ai_analyze", latency)
def benchmark_viz(self, iterations: int = 5):
"""Benchmark visualization service."""
print("\n=== Visualization Service Benchmark ===")
chart_types = ["bar", "line", "pie"]
for i in range(iterations):
if i > 0 and i % max(1, iterations // 2) == 0:
print(f" Progress: {i}/{iterations}")
status, latency = self._request("GET", 8002, "/health", None)
if status in [200, 503]:
self.record("viz_health", latency)
for chart_type in chart_types:
payload = {
"title": f"Benchmark {chart_type}",
"data": {f"Item{j}": j*100 for j in range(5)},
"chart_type": chart_type
}
status, latency = self._request("POST", 8002, "/chart", payload)
if status in [200, 503]:
self.record(f"viz_chart_{chart_type}", latency)
def benchmark_throughput(self, duration: int = 10):
"""Measure request throughput."""
print(f"\n=== Throughput Benchmark ({duration}s) ===")
endpoints = [
(8000, "/health", "api"),
(8001, "/health", "ai"),
(8002, "/health", "viz"),
]
for port, endpoint, service in endpoints:
count = 0
start = time.time()
while time.time() - start < duration:
status, _ = self._request("GET", port, endpoint, None)
if status in [200, 503]:
count += 1
elapsed = time.time() - start
throughput = count / elapsed
print(f" {service.upper():3s} Service: {throughput:6.2f} req/s")
self.record(f"throughput_{service}", throughput)
def get_statistics(self, name: str) -> Dict:
"""Calculate statistics for benchmark results."""
if name not in self.results or len(self.results[name]) == 0:
return {}
values = self.results[name]
return {
"count": len(values),
"min": min(values),
"max": max(values),
"mean": statistics.mean(values),
"median": statistics.median(values),
"stdev": statistics.stdev(values) if len(values) > 1 else 0,
}
def print_summary(self):
"""Print benchmark summary."""
print("\n" + "=" * 70)
print("BENCHMARK SUMMARY")
print("=" * 70)
categories = {
"API Service": ["api_health", "api_root", "api_daily_stats", "api_top_words"],
"AI Service": ["ai_health", "ai_analyze"],
"Visualization": ["viz_health", "viz_chart_bar", "viz_chart_line", "viz_chart_pie"],
"Throughput": ["throughput_api", "throughput_ai", "throughput_viz"],
}
for category, metrics in categories.items():
print(f"\n{category}:")
print("-" * 70)
for metric in metrics:
stats = self.get_statistics(metric)
if stats:
if "throughput" in metric:
print(f" {metric:25s}: {stats['mean']:8.2f} req/s")
else:
print(f" {metric:25s}: {stats['mean']:8.2f}ms "
f"(min: {stats['min']:6.2f}ms, "
f"max: {stats['max']:6.2f}ms)")
print("\n" + "=" * 70)
def generate_report(self, output_file: str = "benchmark_report.json"):
"""Generate detailed benchmark report."""
report = {
"timestamp": datetime.now().isoformat(),
"results": {}
}
for name in self.results.keys():
report["results"][name] = self.get_statistics(name)
with open(output_file, "w") as f:
json.dump(report, f, indent=2)
print(f"\nDetailed report saved to: {output_file}")
def main():
"""Run benchmarks."""
print("Tikker Performance Benchmark")
print("=" * 70)
base_url = "http://localhost"
if len(sys.argv) > 1:
base_url = sys.argv[1]
runner = BenchmarkRunner(base_url=base_url, verbose=True)
try:
runner.benchmark_api(iterations=10)
runner.benchmark_ai(iterations=5)
runner.benchmark_viz(iterations=5)
runner.benchmark_throughput(duration=10)
runner.print_summary()
runner.generate_report()
print("\nBenchmark completed successfully!")
except KeyboardInterrupt:
print("\n\nBenchmark interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\nBenchmark error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()