Was ist ein Reverse Proxy?
Ein Reverse Proxy nimmt Client-Requests entgegen und leitet sie an Backend-Server weiter. Im Gegensatz zum Forward Proxy (der Clients schützt) schützt der Reverse Proxy Server.
Einsatzgebiete:
- SSL-Terminierung
- Load Balancing
- Caching
- Kompression
- Security (Rate Limiting, WAF)
Installation
Ubuntu/Debian
sudo apt update
sudo apt install nginx
sudo systemctl start nginx
sudo systemctl enable nginx
RHEL/CentOS
sudo yum install nginx
sudo systemctl start nginx
sudo systemctl enable nginx
Verify
nginx -v
curl localhost # Sollte Nginx Welcome-Page zeigen
Basis-Konfiguration
Einfacher Reverse Proxy
# /etc/nginx/sites-available/myapp
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
Aktivieren:
sudo ln -s /etc/nginx/sites-available/myapp /etc/nginx/sites-enabled/
sudo nginx -t # Konfiguration testen
sudo systemctl reload nginx
Wichtige Proxy-Header
# Original Host-Header erhalten
proxy_set_header Host $host;
# Echte Client-IP weitergeben
proxy_set_header X-Real-IP $remote_addr;
# IP-Chain für Proxies
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Ursprüngliches Protokoll (http/https)
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket-Support
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
SSL-Terminierung mit Let’s Encrypt
Certbot installieren
sudo apt install certbot python3-certbot-nginx
SSL-Zertifikat erstellen
sudo certbot --nginx -d example.com -d www.example.com
Resultierende Konfiguration
server {
listen 80;
server_name example.com www.example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name example.com www.example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
# SSL-Konfiguration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
}
}
Auto-Renewal
# Test renewal
sudo certbot renew --dry-run
# Crontab bereits durch Certbot eingerichtet
sudo systemctl status certbot.timer
Load Balancing
Upstream-Definition
upstream backend {
# Round Robin (default)
server 10.0.0.1:3000;
server 10.0.0.2:3000;
server 10.0.0.3:3000;
}
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
Load Balancing Strategien
Least Connected
upstream backend {
least_conn;
server 10.0.0.1:3000;
server 10.0.0.2:3000;
}
IP Hash (Session Persistence)
upstream backend {
ip_hash;
server 10.0.0.1:3000;
server 10.0.0.2:3000;
}
Weighted Round Robin
upstream backend {
server 10.0.0.1:3000 weight=3;
server 10.0.0.2:3000 weight=1; # Bekommt 25% der Requests
}
Health Checks
upstream backend {
server 10.0.0.1:3000 max_fails=3 fail_timeout=30s;
server 10.0.0.2:3000 max_fails=3 fail_timeout=30s;
server 10.0.0.3:3000 backup; # Wird nur bei Ausfall verwendet
}
Caching
Proxy Cache konfigurieren
# Cache-Zone definieren (im http-Block)
proxy_cache_path /var/cache/nginx/proxy
levels=1:2
keys_zone=app_cache:10m
max_size=1g
inactive=60m;
server {
listen 80;
server_name example.com;
location / {
proxy_cache app_cache;
proxy_cache_valid 200 60m;
proxy_cache_valid 404 10m;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_bypass $http_cache_control;
add_header X-Cache-Status $upstream_cache_status;
proxy_pass http://localhost:3000;
}
location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ {
proxy_cache app_cache;
proxy_cache_valid 200 1d;
proxy_pass http://localhost:3000;
}
}
Cache-Directory erstellen
sudo mkdir -p /var/cache/nginx/proxy
sudo chown -R nginx:nginx /var/cache/nginx/proxy
Cache-Bypass für spezifische Requests
location /api/ {
proxy_cache app_cache;
proxy_cache_bypass $cookie_nocache $arg_nocache;
proxy_no_cache $cookie_nocache;
proxy_pass http://localhost:3000;
}
Performance-Optimierungen
Kompression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript
application/x-javascript application/xml+rss
application/json application/javascript;
Connection Pooling
upstream backend {
server 10.0.0.1:3000;
keepalive 32; # Anzahl Keep-Alive Connections
}
location / {
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Connection "";
}
Buffer-Konfiguration
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
proxy_busy_buffers_size 8k;
Security
Rate Limiting
# Limit-Zone definieren (http-Block)
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
server {
location /api/ {
limit_req zone=api_limit burst=20 nodelay;
proxy_pass http://localhost:3000;
}
}
Request Size Limits
client_max_body_size 10M;
client_body_buffer_size 128k;
Timeout-Konfiguration
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
IP-Blacklisting
# /etc/nginx/conf.d/blacklist.conf
deny 192.168.1.100;
deny 10.0.0.0/8;
allow all;
Multiple Applications
server {
listen 80;
server_name example.com;
# Frontend (React/Vue/etc.)
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
}
# Backend API
location /api/ {
proxy_pass http://localhost:8080;
proxy_set_header Host $host;
}
# WebSocket-Service
location /ws/ {
proxy_pass http://localhost:9000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# Static Assets (direkt von Nginx)
location /static/ {
alias /var/www/static/;
expires 1y;
add_header Cache-Control "public, immutable";
}
}
Monitoring & Debugging
Access Log mit Custom Format
log_format custom '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'upstream: $upstream_addr '
'response_time: $upstream_response_time';
access_log /var/log/nginx/access.log custom;
Status-Page aktivieren
server {
listen 8080;
server_name localhost;
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
}
Debugging
# Error-Log Level erhöhen
error_log /var/log/nginx/error.log debug;
# Request-Debugging
location / {
add_header X-Debug-Upstream $upstream_addr;
add_header X-Debug-Status $upstream_status;
proxy_pass http://backend;
}
Troubleshooting
Konfiguration testen
sudo nginx -t
Logs checken
sudo tail -f /var/log/nginx/error.log
sudo tail -f /var/log/nginx/access.log
Connection-Probleme
# Backend erreichbar?
curl http://localhost:3000
# Firewall checken
sudo ufw status
# Nginx-Prozesse
ps aux | grep nginx
Permission-Issues
# SELinux (CentOS/RHEL)
sudo setsebool -P httpd_can_network_connect 1
# File-Permissions
sudo chown -R nginx:nginx /var/www
Best Practices
- Immer testen:
nginx -tvor reload - Logs monitoren: Fehler frühzeitig erkennen
- SSL verwenden: Let’s Encrypt ist kostenlos
- Rate Limiting: DDoS-Schutz implementieren
- Caching nutzen: Performance drastisch verbessern
- Health Checks: Ausfälle automatisch handhaben
- Separate Configs: Eine Datei pro App in sites-available
- Backups: /etc/nginx regelmäßig sichern
Beispiel: Production-Ready Setup
# /etc/nginx/sites-available/production-app
upstream app_backend {
least_conn;
server 10.0.0.1:3000 max_fails=3 fail_timeout=30s;
server 10.0.0.2:3000 max_fails=3 fail_timeout=30s;
server 10.0.0.3:3000 backup;
keepalive 32;
}
limit_req_zone $binary_remote_addr zone=app_limit:10m rate=50r/s;
server {
listen 80;
server_name example.com www.example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name example.com www.example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 10M;
gzip on;
gzip_types text/plain text/css application/json application/javascript;
location / {
limit_req zone=app_limit burst=100 nodelay;
proxy_cache app_cache;
proxy_cache_valid 200 5m;
proxy_cache_use_stale error timeout updating;
proxy_pass http://app_backend;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Connection "";
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
access_log /var/log/nginx/app_access.log custom;
error_log /var/log/nginx/app_error.log warn;
}
Nginx ist mächtig und flexibel. Mit dieser Basis-Konfiguration sind Production-Deployments sicher und performant.