224 lines
6.6 KiB
Python
224 lines
6.6 KiB
Python
|
|
"""
|
||
|
|
Gunicorn Configuration for WebDAV Server
|
||
|
|
Production-grade WSGI server configuration with aiohttp worker
|
||
|
|
"""
|
||
|
|
|
||
|
|
import os
|
||
|
|
import multiprocessing
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# Server Socket
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
bind = f"{os.getenv('HOST', '0.0.0.0')}:{os.getenv('PORT', '8080')}"
|
||
|
|
backlog = 2048
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# Worker Processes
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
# Number of worker processes
|
||
|
|
workers = int(os.getenv('WORKERS', multiprocessing.cpu_count() * 2 + 1))
|
||
|
|
|
||
|
|
# Worker class - MUST be aiohttp.GunicornWebWorker for aiohttp
|
||
|
|
worker_class = 'aiohttp.GunicornWebWorker'
|
||
|
|
|
||
|
|
# Worker connections (only for async workers)
|
||
|
|
worker_connections = int(os.getenv('WORKER_CONNECTIONS', 1024))
|
||
|
|
|
||
|
|
# Maximum requests a worker will process before restarting (prevents memory leaks)
|
||
|
|
max_requests = int(os.getenv('MAX_REQUESTS', 10000))
|
||
|
|
max_requests_jitter = int(os.getenv('MAX_REQUESTS_JITTER', 1000))
|
||
|
|
|
||
|
|
# Worker timeout in seconds
|
||
|
|
timeout = int(os.getenv('WORKER_TIMEOUT', 60))
|
||
|
|
|
||
|
|
# Graceful timeout for workers
|
||
|
|
graceful_timeout = int(os.getenv('GRACEFUL_TIMEOUT', 30))
|
||
|
|
|
||
|
|
# Keep-alive timeout
|
||
|
|
keepalive = int(os.getenv('KEEPALIVE_TIMEOUT', 30))
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# Logging
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
# Access log
|
||
|
|
accesslog = os.getenv('ACCESS_LOG_FILE', './logs/gunicorn_access.log')
|
||
|
|
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" %(D)s'
|
||
|
|
|
||
|
|
# Error log
|
||
|
|
errorlog = os.getenv('ERROR_LOG_FILE', './logs/gunicorn_error.log')
|
||
|
|
|
||
|
|
# Log level
|
||
|
|
loglevel = os.getenv('LOG_LEVEL', 'info').lower()
|
||
|
|
|
||
|
|
# Capture output from workers
|
||
|
|
capture_output = True
|
||
|
|
|
||
|
|
# Enable log rotation
|
||
|
|
logconfig_dict = {
|
||
|
|
'version': 1,
|
||
|
|
'disable_existing_loggers': False,
|
||
|
|
'formatters': {
|
||
|
|
'generic': {
|
||
|
|
'format': '%(asctime)s [%(process)d] [%(levelname)s] %(message)s',
|
||
|
|
'datefmt': '%Y-%m-%d %H:%M:%S',
|
||
|
|
'class': 'logging.Formatter'
|
||
|
|
},
|
||
|
|
'access': {
|
||
|
|
'format': '%(message)s',
|
||
|
|
'class': 'logging.Formatter'
|
||
|
|
}
|
||
|
|
},
|
||
|
|
'handlers': {
|
||
|
|
'console': {
|
||
|
|
'class': 'logging.StreamHandler',
|
||
|
|
'formatter': 'generic',
|
||
|
|
'stream': 'ext://sys.stdout'
|
||
|
|
},
|
||
|
|
'error_file': {
|
||
|
|
'class': 'logging.handlers.RotatingFileHandler',
|
||
|
|
'formatter': 'generic',
|
||
|
|
'filename': errorlog,
|
||
|
|
'maxBytes': 10485760, # 10MB
|
||
|
|
'backupCount': 5
|
||
|
|
},
|
||
|
|
'access_file': {
|
||
|
|
'class': 'logging.handlers.RotatingFileHandler',
|
||
|
|
'formatter': 'access',
|
||
|
|
'filename': accesslog,
|
||
|
|
'maxBytes': 10485760, # 10MB
|
||
|
|
'backupCount': 5
|
||
|
|
}
|
||
|
|
},
|
||
|
|
'loggers': {
|
||
|
|
'gunicorn.error': {
|
||
|
|
'handlers': ['console', 'error_file'],
|
||
|
|
'level': loglevel.upper(),
|
||
|
|
'propagate': False
|
||
|
|
},
|
||
|
|
'gunicorn.access': {
|
||
|
|
'handlers': ['access_file'],
|
||
|
|
'level': 'INFO',
|
||
|
|
'propagate': False
|
||
|
|
}
|
||
|
|
},
|
||
|
|
'root': {
|
||
|
|
'level': loglevel.upper(),
|
||
|
|
'handlers': ['console', 'error_file']
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# Process Naming
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
proc_name = 'webdav_server'
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# Server Mechanics
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
# Daemon mode (run in background)
|
||
|
|
daemon = os.getenv('DAEMON', 'false').lower() == 'true'
|
||
|
|
|
||
|
|
# PID file
|
||
|
|
pidfile = os.getenv('PID_FILE', './gunicorn.pid')
|
||
|
|
|
||
|
|
# User and group to run workers
|
||
|
|
user = os.getenv('USER', None)
|
||
|
|
group = os.getenv('GROUP', None)
|
||
|
|
|
||
|
|
# Directory to switch to before loading apps
|
||
|
|
chdir = os.getenv('CHDIR', '.')
|
||
|
|
|
||
|
|
# Environment variables to set for workers
|
||
|
|
raw_env = [
|
||
|
|
f"WEBDAV_ENV={os.getenv('WEBDAV_ENV', 'production')}"
|
||
|
|
]
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# Server Hooks
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
def on_starting(server):
|
||
|
|
"""Called just before the master process is initialized."""
|
||
|
|
server.log.info("Starting WebDAV Server...")
|
||
|
|
|
||
|
|
# Create necessary directories
|
||
|
|
os.makedirs('./logs', exist_ok=True)
|
||
|
|
os.makedirs('./webdav', exist_ok=True)
|
||
|
|
os.makedirs('./backups', exist_ok=True)
|
||
|
|
|
||
|
|
|
||
|
|
def on_reload(server):
|
||
|
|
"""Called to recycle workers during a reload."""
|
||
|
|
server.log.info("Reloading WebDAV Server...")
|
||
|
|
|
||
|
|
|
||
|
|
def when_ready(server):
|
||
|
|
"""Called just after the server is started."""
|
||
|
|
server.log.info(f"WebDAV Server is ready. Listening on: {bind}")
|
||
|
|
server.log.info(f"Workers: {workers}")
|
||
|
|
server.log.info(f"Worker class: {worker_class}")
|
||
|
|
|
||
|
|
|
||
|
|
def pre_fork(server, worker):
|
||
|
|
"""Called just before a worker is forked."""
|
||
|
|
pass
|
||
|
|
|
||
|
|
|
||
|
|
def post_fork(server, worker):
|
||
|
|
"""Called just after a worker has been forked."""
|
||
|
|
server.log.info(f"Worker spawned (pid: {worker.pid})")
|
||
|
|
|
||
|
|
|
||
|
|
def pre_exec(server):
|
||
|
|
"""Called just before a new master process is forked."""
|
||
|
|
server.log.info("Forked child, re-executing.")
|
||
|
|
|
||
|
|
|
||
|
|
def worker_int(worker):
|
||
|
|
"""Called when a worker receives the SIGINT or SIGQUIT signal."""
|
||
|
|
worker.log.info(f"Worker received INT or QUIT signal (pid: {worker.pid})")
|
||
|
|
|
||
|
|
|
||
|
|
def worker_abort(worker):
|
||
|
|
"""Called when a worker receives the SIGABRT signal."""
|
||
|
|
worker.log.info(f"Worker received SIGABRT signal (pid: {worker.pid})")
|
||
|
|
|
||
|
|
|
||
|
|
def pre_request(worker, req):
|
||
|
|
"""Called just before a worker processes the request."""
|
||
|
|
worker.log.debug(f"{req.method} {req.path}")
|
||
|
|
|
||
|
|
|
||
|
|
def post_request(worker, req, environ, resp):
|
||
|
|
"""Called after a worker processes the request."""
|
||
|
|
pass
|
||
|
|
|
||
|
|
|
||
|
|
def worker_exit(server, worker):
|
||
|
|
"""Called just after a worker has been exited."""
|
||
|
|
server.log.info(f"Worker exited (pid: {worker.pid})")
|
||
|
|
|
||
|
|
|
||
|
|
def on_exit(server):
|
||
|
|
"""Called just before exiting Gunicorn."""
|
||
|
|
server.log.info("Shutting down WebDAV Server...")
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# SSL Configuration (if needed)
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
# Uncomment and configure for SSL support
|
||
|
|
# keyfile = '/path/to/key.pem'
|
||
|
|
# certfile = '/path/to/cert.pem'
|
||
|
|
# ssl_version = 'TLSv1_2'
|
||
|
|
# cert_reqs = 0 # ssl.CERT_NONE
|
||
|
|
# ca_certs = None
|
||
|
|
# suppress_ragged_eofs = True
|
||
|
|
# do_handshake_on_connect = False
|
||
|
|
# ciphers = 'TLSv1'
|