#!/usr/bin/env python3
"""
WebDAV Server Concurrent Benchmark Tool
Heavy load testing with performance metrics per method
"""
import asyncio
import aiohttp
import time
import argparse
import statistics
from dataclasses import dataclass, field
from typing import List, Dict, Optional
from collections import defaultdict
import random
import string
@dataclass
class RequestMetrics:
"""Metrics for a single request"""
method: str
duration: float
status: int
success: bool
error: Optional[str] = None
filename: Optional[str] = None # To track created/moved resources
@dataclass
class MethodStats:
"""Statistics for a specific HTTP method"""
method: str
total_requests: int = 0
successful_requests: int = 0
failed_requests: int = 0
total_duration: float = 0.0
durations: List[float] = field(default_factory=list)
errors: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
@property
def success_rate(self) -> float:
return (self.successful_requests / self.total_requests * 100) if self.total_requests > 0 else 0
@property
def avg_duration(self) -> float:
return self.total_duration / self.total_requests if self.total_requests > 0 else 0
@property
def requests_per_second(self) -> float:
# A more accurate RPS for a method is its count over the total benchmark time
# This property is not used in the final report, but we'll leave it for potential use.
return self.total_requests / self.total_duration if self.total_duration > 0 else 0
@property
def min_duration(self) -> float:
return min(self.durations) if self.durations else 0
@property
def max_duration(self) -> float:
return max(self.durations) if self.durations else 0
@property
def p50_duration(self) -> float:
return statistics.median(self.durations) if self.durations else 0
@property
def p95_duration(self) -> float:
if not self.durations:
return 0
sorted_durations = sorted(self.durations)
index = int(len(sorted_durations) * 0.95)
return sorted_durations[index] if index < len(sorted_durations) else sorted_durations[-1]
@property
def p99_duration(self) -> float:
if not self.durations:
return 0
sorted_durations = sorted(self.durations)
index = int(len(sorted_durations) * 0.99)
return sorted_durations[index] if index < len(sorted_durations) else sorted_durations[-1]
class WebDAVBenchmark:
"""WebDAV server benchmark runner"""
def __init__(self, url: str, username: str, password: str,
concurrency: int = 50, duration: int = 60):
self.url = url.rstrip('/')
self.username = username
self.password = password
self.concurrency = concurrency
self.duration = duration
self.stats: Dict[str, MethodStats] = defaultdict(lambda: MethodStats(method=""))
self.start_time = 0.0
self.stop_flag = False
self.auth = aiohttp.BasicAuth(username, password)
def random_string(self, length: int = 10) -> str:
"""Generate random string"""
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
async def record_metric(self, metric: RequestMetrics):
"""Record a request metric"""
stats = self.stats[metric.method]
if not stats.method:
stats.method = metric.method
stats.total_requests += 1
stats.total_duration += metric.duration
stats.durations.append(metric.duration)
if metric.success:
stats.successful_requests += 1
else:
stats.failed_requests += 1
error_key = f"Status {metric.status}" if metric.status != 0 else str(metric.error)
stats.errors[error_key] += 1
async def benchmark_options(self, session: aiohttp.ClientSession) -> RequestMetrics:
"""Benchmark OPTIONS request"""
start = time.time()
try:
async with session.options(self.url, auth=self.auth) as resp:
duration = time.time() - start
return RequestMetrics(
method='OPTIONS', duration=duration, status=resp.status,
success=resp.status == 200
)
except Exception as e:
return RequestMetrics(
method='OPTIONS', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_propfind(self, session: aiohttp.ClientSession, depth: int = 0) -> RequestMetrics:
"""Benchmark PROPFIND request"""
propfind_body = '''
'''
start = time.time()
try:
async with session.request(
'PROPFIND', self.url, auth=self.auth, data=propfind_body,
headers={'Depth': str(depth), 'Content-Type': 'application/xml'}
) as resp:
await resp.read()
duration = time.time() - start
return RequestMetrics(
method='PROPFIND', duration=duration, status=resp.status,
success=resp.status == 207
)
except Exception as e:
return RequestMetrics(
method='PROPFIND', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_put(self, session: aiohttp.ClientSession) -> RequestMetrics:
"""Benchmark PUT request"""
filename = f"bench_{self.random_string()}.txt"
content = self.random_string(1024).encode()
start = time.time()
try:
async with session.put(f"{self.url}/{filename}", auth=self.auth, data=content) as resp:
duration = time.time() - start
is_success = resp.status in [201, 204]
return RequestMetrics(
method='PUT', duration=duration, status=resp.status,
success=is_success,
filename=filename if is_success else None
)
except Exception as e:
return RequestMetrics(
method='PUT', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_get(self, session: aiohttp.ClientSession, filename: str) -> RequestMetrics:
"""Benchmark GET request"""
start = time.time()
try:
async with session.get(f"{self.url}/{filename}", auth=self.auth) as resp:
await resp.read()
duration = time.time() - start
return RequestMetrics(
method='GET', duration=duration, status=resp.status,
success=resp.status == 200
)
except Exception as e:
return RequestMetrics(
method='GET', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_head(self, session: aiohttp.ClientSession, filename: str) -> RequestMetrics:
"""Benchmark HEAD request"""
start = time.time()
try:
async with session.head(f"{self.url}/{filename}", auth=self.auth) as resp:
duration = time.time() - start
return RequestMetrics(
method='HEAD', duration=duration, status=resp.status,
success=resp.status == 200
)
except Exception as e:
return RequestMetrics(
method='HEAD', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_mkcol(self, session: aiohttp.ClientSession) -> RequestMetrics:
"""Benchmark MKCOL request"""
dirname = f"bench_dir_{self.random_string()}"
start = time.time()
try:
async with session.request('MKCOL', f"{self.url}/{dirname}/", auth=self.auth) as resp:
duration = time.time() - start
is_success = resp.status == 201
return RequestMetrics(
method='MKCOL', duration=duration, status=resp.status,
success=is_success,
filename=dirname if is_success else None
)
except Exception as e:
return RequestMetrics(
method='MKCOL', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_proppatch(self, session: aiohttp.ClientSession, filename: str) -> RequestMetrics:
"""Benchmark PROPPATCH request"""
proppatch_body = '''
Benchmark Test
'''
start = time.time()
try:
async with session.request(
'PROPPATCH', f"{self.url}/{filename}", auth=self.auth, data=proppatch_body,
headers={'Content-Type': 'application/xml'}
) as resp:
await resp.read()
duration = time.time() - start
return RequestMetrics(
method='PROPPATCH', duration=duration, status=resp.status,
success=resp.status == 207
)
except Exception as e:
return RequestMetrics(
method='PROPPATCH', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_copy(self, session: aiohttp.ClientSession, filename: str) -> RequestMetrics:
"""Benchmark COPY request"""
dest_filename = f"copy_{self.random_string()}.txt"
start = time.time()
try:
async with session.request(
'COPY', f"{self.url}/{filename}", auth=self.auth,
headers={'Destination': f"{self.url}/{dest_filename}"}
) as resp:
duration = time.time() - start
is_success = resp.status in [201, 204]
return RequestMetrics(
method='COPY', duration=duration, status=resp.status,
success=is_success,
filename=dest_filename if is_success else None
)
except Exception as e:
return RequestMetrics(
method='COPY', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_move(self, session: aiohttp.ClientSession, filename: str) -> RequestMetrics:
"""Benchmark MOVE request"""
dest_filename = f"moved_{self.random_string()}.txt"
start = time.time()
try:
async with session.request(
'MOVE', f"{self.url}/{filename}", auth=self.auth,
headers={'Destination': f"{self.url}/{dest_filename}"}
) as resp:
duration = time.time() - start
is_success = resp.status in [201, 204]
return RequestMetrics(
method='MOVE', duration=duration, status=resp.status,
success=is_success,
filename=dest_filename if is_success else None
)
except Exception as e:
return RequestMetrics(
method='MOVE', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_lock(self, session: aiohttp.ClientSession, filename: str) -> RequestMetrics:
"""Benchmark LOCK request"""
lock_body = '''
benchmark
'''
start = time.time()
try:
async with session.request(
'LOCK', f"{self.url}/{filename}", auth=self.auth, data=lock_body,
headers={'Content-Type': 'application/xml', 'Timeout': 'Second-300'}
) as resp:
lock_token = resp.headers.get('Lock-Token', '').strip('<>')
await resp.read()
duration = time.time() - start
is_success = resp.status == 200
if is_success and lock_token:
try:
async with session.request(
'UNLOCK', f"{self.url}/{filename}", auth=self.auth,
headers={'Lock-Token': f'<{lock_token}>'}
):
pass
except:
pass
return RequestMetrics(
method='LOCK', duration=duration, status=resp.status,
success=is_success
)
except Exception as e:
return RequestMetrics(
method='LOCK', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def benchmark_delete(self, session: aiohttp.ClientSession, resource_name: str) -> RequestMetrics:
"""Benchmark DELETE request for files or directories"""
start = time.time()
try:
# Add trailing slash for directories for some servers
url_path = f"{self.url}/{resource_name}"
if "dir" in resource_name:
url_path += "/"
async with session.delete(url_path, auth=self.auth) as resp:
duration = time.time() - start
return RequestMetrics(
method='DELETE', duration=duration, status=resp.status,
success=resp.status == 204
)
except Exception as e:
return RequestMetrics(
method='DELETE', duration=time.time() - start, status=0,
success=False, error=str(e)
)
async def worker(self, worker_id: int, session: aiohttp.ClientSession):
"""Worker coroutine that runs various benchmarks"""
test_files = []
test_dirs = []
# Create an initial test file to ensure other operations can start
metric = await self.benchmark_put(session)
await self.record_metric(metric)
if metric.success and metric.filename:
test_files.append(metric.filename)
while not self.stop_flag:
elapsed = time.time() - self.start_time
if elapsed >= self.duration:
self.stop_flag = True
break
# Weighted random choice
operations = [
'options', 'propfind', 'put', 'get', 'head',
'mkcol', 'proppatch', 'copy', 'move', 'lock', 'delete'
]
# Ensure some operations are more frequent
weights = [5, 5, 15, 15, 10, 5, 5, 5, 5, 5, 20] # More PUT, GET, DELETE
operation = random.choices(operations, weights=weights, k=1)[0]
metric = None
try:
if operation == 'options':
metric = await self.benchmark_options(session)
elif operation == 'propfind':
depth = random.choice([0, 1])
metric = await self.benchmark_propfind(session, depth)
elif operation == 'put':
metric = await self.benchmark_put(session)
if metric.success and metric.filename:
test_files.append(metric.filename)
elif operation == 'get' and test_files:
filename = random.choice(test_files)
metric = await self.benchmark_get(session, filename)
elif operation == 'head' and test_files:
filename = random.choice(test_files)
metric = await self.benchmark_head(session, filename)
elif operation == 'mkcol':
metric = await self.benchmark_mkcol(session)
if metric.success and metric.filename:
test_dirs.append(metric.filename)
elif operation == 'proppatch' and test_files:
filename = random.choice(test_files)
metric = await self.benchmark_proppatch(session, filename)
elif operation == 'copy' and test_files:
filename = random.choice(test_files)
metric = await self.benchmark_copy(session, filename)
if metric.success and metric.filename:
test_files.append(metric.filename)
elif operation == 'move' and len(test_files) > 1:
filename_to_move = test_files.pop(random.randrange(len(test_files)))
metric = await self.benchmark_move(session, filename_to_move)
if metric.success and metric.filename:
test_files.append(metric.filename)
elif operation == 'lock' and test_files:
filename = random.choice(test_files)
metric = await self.benchmark_lock(session, filename)
elif operation == 'delete':
# Randomly delete a file or a directory
if test_dirs and random.random() < 0.2 and len(test_dirs) > 0: # 20% chance to delete a dir
dir_to_delete = test_dirs.pop(random.randrange(len(test_dirs)))
metric = await self.benchmark_delete(session, dir_to_delete)
elif len(test_files) > 1:
file_to_delete = test_files.pop(random.randrange(len(test_files)))
metric = await self.benchmark_delete(session, file_to_delete)
if metric:
await self.record_metric(metric)
except Exception as e:
print(f"Worker {worker_id} error: {e}")
await asyncio.sleep(0.01) # Small delay to prevent tight loop on empty lists
async def run(self):
"""Run the benchmark"""
print("="*80)
print("WebDAV Server Concurrent Benchmark")
print("="*80)
print(f"URL: {self.url}")
print(f"Concurrency: {self.concurrency} workers")
print(f"Duration: {self.duration} seconds")
print(f"User: {self.username}")
print("="*80)
print()
connector = aiohttp.TCPConnector(limit=self.concurrency * 2)
timeout = aiohttp.ClientTimeout(total=30)
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
self.start_time = time.time()
workers = [
asyncio.create_task(self.worker(i, session))
for i in range(self.concurrency)
]
progress_task = asyncio.create_task(self.show_progress())
await asyncio.gather(*workers, return_exceptions=True)
self.stop_flag = True
await progress_task
self.print_results()
async def show_progress(self):
"""Show progress during benchmark"""
while not self.stop_flag:
elapsed = time.time() - self.start_time
if elapsed >= self.duration:
break
total_requests = sum(s.total_requests for s in self.stats.values())
print(f"\rProgress: {elapsed:.1f}s / {self.duration}s | Total Requests: {total_requests}", end='', flush=True)
await asyncio.sleep(0.5)
print()
def print_results(self):
"""Print benchmark results"""
print("\n")
print("="*80)
print("BENCHMARK RESULTS")
print("="*80)
print()
total_duration = time.time() - self.start_time
total_requests = sum(s.total_requests for s in self.stats.values())
total_success = sum(s.successful_requests for s in self.stats.values())
total_failed = total_requests - total_success
success_rate = (total_success / total_requests * 100) if total_requests > 0 else 0
failed_rate = (total_failed / total_requests * 100) if total_requests > 0 else 0
print(f"Total Duration: {total_duration:.2f}s")
print(f"Total Requests: {total_requests:,}")
print(f"Successful: {total_success:,} ({success_rate:.1f}%)")
print(f"Failed: {total_failed:,} ({failed_rate:.1f}%)")
print(f"Overall RPS: {total_requests/total_duration:.2f}")
print()
sorted_stats = sorted(self.stats.values(), key=lambda s: s.total_requests, reverse=True)
print("="*80)
print("PER-METHOD STATISTICS")
print("="*80)
print()
for stats in sorted_stats:
if stats.total_requests == 0:
continue
# Calculate RPS based on total benchmark duration for better comparison
method_rps = stats.total_requests / total_duration
print(f"Method: {stats.method}")
print(f" Requests: {stats.total_requests:>8,}")
print(f" Success Rate: {stats.success_rate:>8.2f}%")
print(f" RPS: {method_rps:>8.2f}")
print(f" Latency (ms):")
print(f" Min: {stats.min_duration*1000:>8.2f}")
print(f" Avg: {stats.avg_duration*1000:>8.2f}")
print(f" P50: {stats.p50_duration*1000:>8.2f}")
print(f" P95: {stats.p95_duration*1000:>8.2f}")
print(f" P99: {stats.p99_duration*1000:>8.2f}")
print(f" Max: {stats.max_duration*1000:>8.2f}")
if stats.failed_requests > 0 and stats.errors:
print(f" Errors:")
for error, count in sorted(stats.errors.items(), key=lambda x: x[1], reverse=True)[:5]:
error_short = error[:60] + '...' if len(error) > 60 else error
print(f" {error_short}: {count}")
print()
print("="*80)
async def main():
"""Main entry point"""
parser = argparse.ArgumentParser(description='WebDAV Server Concurrent Benchmark')
parser.add_argument('url', help='WebDAV server URL (e.g., http://localhost:8080/)')
parser.add_argument('username', help='Username for authentication')
parser.add_argument('password', help='Password for authentication')
parser.add_argument('-c', '--concurrency', type=int, default=50,
help='Number of concurrent workers (default: 50)')
parser.add_argument('-d', '--duration', type=int, default=60,
help='Benchmark duration in seconds (default: 60)')
args = parser.parse_args()
benchmark = WebDAVBenchmark(
url=args.url,
username=args.username,
password=args.password,
concurrency=args.concurrency,
duration=args.duration
)
await benchmark.run()
if __name__ == '__main__':
asyncio.run(main())