|
"""
|
|
Performance Testing for Tikker Services
|
|
|
|
Measures response times, throughput, and resource usage.
|
|
Identifies bottlenecks and optimization opportunities.
|
|
"""
|
|
|
|
import pytest
|
|
import time
|
|
import json
|
|
from typing import Dict, List, Tuple
|
|
import statistics
|
|
|
|
|
|
class PerformanceMetrics:
|
|
"""Collect and analyze performance metrics."""
|
|
|
|
def __init__(self):
|
|
self.measurements: Dict[str, List[float]] = {}
|
|
|
|
def record(self, name: str, value: float):
|
|
"""Record a measurement."""
|
|
if name not in self.measurements:
|
|
self.measurements[name] = []
|
|
self.measurements[name].append(value)
|
|
|
|
def summary(self, name: str) -> Dict[str, float]:
|
|
"""Get summary statistics for measurements."""
|
|
if name not in self.measurements:
|
|
return {}
|
|
|
|
values = self.measurements[name]
|
|
return {
|
|
"count": len(values),
|
|
"min": min(values),
|
|
"max": max(values),
|
|
"avg": statistics.mean(values),
|
|
"median": statistics.median(values),
|
|
"stdev": statistics.stdev(values) if len(values) > 1 else 0
|
|
}
|
|
|
|
|
|
@pytest.fixture
|
|
def metrics():
|
|
"""Provide metrics collector."""
|
|
return PerformanceMetrics()
|
|
|
|
|
|
class TestAPIPerformance:
|
|
"""Tests for API performance characteristics."""
|
|
|
|
def test_health_check_latency(self, api_client, metrics):
|
|
"""Measure health check endpoint latency."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
for _ in range(10):
|
|
start = time.time()
|
|
response = api_client.get("/health")
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
assert response.status_code == 200
|
|
metrics.record("health_check_latency", elapsed)
|
|
|
|
summary = metrics.summary("health_check_latency")
|
|
assert summary["avg"] < 100, "Health check should be < 100ms"
|
|
assert summary["max"] < 500, "Health check max should be < 500ms"
|
|
|
|
def test_daily_stats_latency(self, api_client, metrics):
|
|
"""Measure daily stats endpoint latency."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
for _ in range(5):
|
|
start = time.time()
|
|
response = api_client.get("/api/stats/daily")
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
if response.status_code == 200:
|
|
metrics.record("daily_stats_latency", elapsed)
|
|
|
|
if "daily_stats_latency" in metrics.measurements:
|
|
summary = metrics.summary("daily_stats_latency")
|
|
assert summary["avg"] < 200, "Daily stats should be < 200ms"
|
|
|
|
def test_top_words_latency(self, api_client, metrics):
|
|
"""Measure top words endpoint latency."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
for limit in [10, 50, 100]:
|
|
for _ in range(3):
|
|
start = time.time()
|
|
response = api_client.get(f"/api/words/top?limit={limit}")
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
if response.status_code == 200:
|
|
metrics.record(f"top_words_latency_{limit}", elapsed)
|
|
|
|
for limit in [10, 50, 100]:
|
|
key = f"top_words_latency_{limit}"
|
|
if key in metrics.measurements:
|
|
summary = metrics.summary(key)
|
|
assert summary["avg"] < 500, f"Top words (limit={limit}) should be < 500ms"
|
|
|
|
def test_concurrent_requests(self, api_client, metrics):
|
|
"""Test API under concurrent load."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
endpoints = [
|
|
"/health",
|
|
"/api/stats/daily",
|
|
"/api/words/top?limit=10"
|
|
]
|
|
|
|
times = []
|
|
for endpoint in endpoints:
|
|
start = time.time()
|
|
response = api_client.get(endpoint)
|
|
elapsed = (time.time() - start) * 1000
|
|
times.append(elapsed)
|
|
|
|
if response.status_code == 200:
|
|
metrics.record("concurrent_request_latency", elapsed)
|
|
|
|
avg_time = statistics.mean(times)
|
|
assert avg_time < 300, "Average concurrent request latency should be < 300ms"
|
|
|
|
|
|
class TestAIPerformance:
|
|
"""Tests for AI service performance."""
|
|
|
|
def test_health_check_latency(self, ai_client, metrics):
|
|
"""Measure AI health check latency."""
|
|
if not ai_client:
|
|
pytest.skip("AI client not available")
|
|
|
|
for _ in range(5):
|
|
start = time.time()
|
|
response = ai_client.get("/health")
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
assert response.status_code == 200
|
|
metrics.record("ai_health_latency", elapsed)
|
|
|
|
summary = metrics.summary("ai_health_latency")
|
|
assert summary["avg"] < 100, "AI health check should be < 100ms"
|
|
|
|
def test_analysis_latency(self, ai_client, metrics):
|
|
"""Measure text analysis latency."""
|
|
if not ai_client:
|
|
pytest.skip("AI client not available")
|
|
|
|
payload = {
|
|
"text": "This is a test message for analysis of keystroke patterns",
|
|
"analysis_type": "general"
|
|
}
|
|
|
|
for _ in range(3):
|
|
start = time.time()
|
|
response = ai_client.post("/analyze", json=payload)
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
if response.status_code == 200:
|
|
metrics.record("ai_analysis_latency", elapsed)
|
|
|
|
if "ai_analysis_latency" in metrics.measurements:
|
|
summary = metrics.summary("ai_analysis_latency")
|
|
print(f"\nAI Analysis latency: {summary}")
|
|
|
|
|
|
class TestVizPerformance:
|
|
"""Tests for visualization service performance."""
|
|
|
|
def test_health_check_latency(self, viz_client, metrics):
|
|
"""Measure visualization health check latency."""
|
|
if not viz_client:
|
|
pytest.skip("Visualization client not available")
|
|
|
|
for _ in range(5):
|
|
start = time.time()
|
|
response = viz_client.get("/health")
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
assert response.status_code == 200
|
|
metrics.record("viz_health_latency", elapsed)
|
|
|
|
summary = metrics.summary("viz_health_latency")
|
|
assert summary["avg"] < 100, "Viz health check should be < 100ms"
|
|
|
|
def test_chart_generation_latency(self, viz_client, metrics):
|
|
"""Measure chart generation latency."""
|
|
if not viz_client:
|
|
pytest.skip("Visualization client not available")
|
|
|
|
for chart_type in ["bar", "line", "pie"]:
|
|
payload = {
|
|
"title": f"Test {chart_type} Chart",
|
|
"data": {f"Item{i}": i*100 for i in range(10)},
|
|
"chart_type": chart_type
|
|
}
|
|
|
|
for _ in range(2):
|
|
start = time.time()
|
|
response = viz_client.post("/chart", json=payload)
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
if response.status_code == 200:
|
|
metrics.record(f"chart_{chart_type}_latency", elapsed)
|
|
|
|
for chart_type in ["bar", "line", "pie"]:
|
|
key = f"chart_{chart_type}_latency"
|
|
if key in metrics.measurements:
|
|
summary = metrics.summary(key)
|
|
assert summary["avg"] < 1000, f"{chart_type} chart should be < 1000ms"
|
|
|
|
|
|
class TestThroughput:
|
|
"""Tests for service throughput."""
|
|
|
|
def test_sequential_requests(self, api_client):
|
|
"""Test sequential request throughput."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
start = time.time()
|
|
count = 0
|
|
|
|
while time.time() - start < 5:
|
|
response = api_client.get("/health")
|
|
if response.status_code == 200:
|
|
count += 1
|
|
|
|
elapsed = time.time() - start
|
|
throughput = count / elapsed
|
|
|
|
print(f"\nSequential throughput: {throughput:.2f} req/s")
|
|
assert throughput > 10, "Throughput should be > 10 req/s"
|
|
|
|
def test_word_search_throughput(self, api_client):
|
|
"""Test word search throughput."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
words = ["the", "and", "test", "python", "data"]
|
|
start = time.time()
|
|
count = 0
|
|
|
|
while time.time() - start < 5:
|
|
for word in words:
|
|
response = api_client.get(f"/api/words/find?word={word}")
|
|
if response.status_code in [200, 404]:
|
|
count += 1
|
|
|
|
elapsed = time.time() - start
|
|
throughput = count / elapsed
|
|
|
|
print(f"\nWord search throughput: {throughput:.2f} req/s")
|
|
|
|
|
|
class TestMemoryUsage:
|
|
"""Tests for memory consumption patterns."""
|
|
|
|
def test_large_data_response(self, api_client):
|
|
"""Test API with large data response."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
response = api_client.get("/api/words/top?limit=100")
|
|
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
size_mb = len(json.dumps(data)) / (1024 * 1024)
|
|
print(f"\nResponse size: {size_mb:.2f} MB")
|
|
assert size_mb < 10, "Response should be < 10 MB"
|
|
|
|
def test_repeated_requests(self, api_client):
|
|
"""Test for memory leaks with repeated requests."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
for _ in range(100):
|
|
response = api_client.get("/health")
|
|
assert response.status_code == 200
|
|
|
|
|
|
class TestResponseQuality:
|
|
"""Tests for response quality metrics."""
|
|
|
|
def test_daily_stats_response_structure(self, api_client):
|
|
"""Verify daily stats response structure."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
response = api_client.get("/api/stats/daily")
|
|
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
required_fields = ["presses", "releases", "repeats", "total"]
|
|
for field in required_fields:
|
|
assert field in data, f"Missing field: {field}"
|
|
|
|
def test_top_words_response_structure(self, api_client):
|
|
"""Verify top words response structure."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
response = api_client.get("/api/words/top?limit=5")
|
|
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
assert isinstance(data, list), "Response should be a list"
|
|
if len(data) > 0:
|
|
word = data[0]
|
|
required_fields = ["rank", "word", "count", "percentage"]
|
|
for field in required_fields:
|
|
assert field in word, f"Missing field in word: {field}"
|
|
|
|
|
|
class TestErrorRecovery:
|
|
"""Tests for error handling and recovery."""
|
|
|
|
def test_invalid_parameter_handling(self, api_client, metrics):
|
|
"""Test handling of invalid parameters."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
start = time.time()
|
|
response = api_client.get("/api/words/find?word=")
|
|
elapsed = (time.time() - start) * 1000
|
|
|
|
metrics.record("invalid_param_latency", elapsed)
|
|
assert response.status_code in [200, 400]
|
|
assert elapsed < 100, "Error response should be quick"
|
|
|
|
def test_missing_required_parameter(self, api_client):
|
|
"""Test missing required parameter."""
|
|
if not api_client:
|
|
pytest.skip("API client not available")
|
|
|
|
response = api_client.get("/api/stats/hourly")
|
|
assert response.status_code in [400, 422, 200]
|