tutorials / docker-microservices-architektur

Microservices mit Docker orchestrieren

Einführung

In diesem Tutorial lernst du, wie du eine skalierbare Microservices-Architektur mit Docker aufbaust und orchestrierst. Wir behandeln fortgeschrittene Konzepte wie Service Discovery, Load Balancing und Health Checks.

Voraussetzungen

  • Grundkenntnisse in Docker und Docker Compose
  • Verständnis von Netzwerk-Grundlagen
  • Erfahrung mit REST APIs
  • Node.js oder Python Kenntnisse

Architektur-Überblick

Wir bauen folgende Services:

┌─────────────┐
│   Nginx     │ ← Load Balancer / Reverse Proxy
└──────┬──────┘

   ┌───┴────┬────────┬────────┐
   │        │        │        │
┌──▼──┐  ┌──▼──┐  ┌──▼──┐  ┌──▼──┐
│ API │  │ API │  │Auth │  │Cache│
│  1  │  │  2  │  │     │  │Redis│
└─────┘  └─────┘  └──┬──┘  └─────┘

                  ┌──▼──┐
                  │ DB  │
                  │PgSQL│
                  └─────┘

Docker Compose Setup

docker-compose.yml

version: '3.8'

services:
  # Nginx Load Balancer
  nginx:
    image: nginx:alpine
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
      - ./nginx/ssl:/etc/nginx/ssl:ro
    depends_on:
      - api-service-1
      - api-service-2
    networks:
      - frontend
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
      interval: 30s
      timeout: 10s
      retries: 3

  # API Service (skalierbar)
  api-service-1:
    build:
      context: ./api-service
      dockerfile: Dockerfile
    environment:
      - NODE_ENV=production
      - DATABASE_URL=postgresql://user:pass@db:5432/mydb
      - REDIS_URL=redis://cache:6379
      - SERVICE_NAME=api-service-1
    depends_on:
      db:
        condition: service_healthy
      cache:
        condition: service_started
    networks:
      - frontend
      - backend
    restart: unless-stopped
    deploy:
      resources:
        limits:
          cpus: '0.50'
          memory: 512M
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
      interval: 10s
      timeout: 5s
      retries: 3
      start_period: 40s

  api-service-2:
    build:
      context: ./api-service
      dockerfile: Dockerfile
    environment:
      - NODE_ENV=production
      - DATABASE_URL=postgresql://user:pass@db:5432/mydb
      - REDIS_URL=redis://cache:6379
      - SERVICE_NAME=api-service-2
    depends_on:
      db:
        condition: service_healthy
      cache:
        condition: service_started
    networks:
      - frontend
      - backend
    restart: unless-stopped
    deploy:
      resources:
        limits:
          cpus: '0.50'
          memory: 512M
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
      interval: 10s
      timeout: 5s
      retries: 3
      start_period: 40s

  # Auth Service
  auth-service:
    build:
      context: ./auth-service
      dockerfile: Dockerfile
    environment:
      - NODE_ENV=production
      - DATABASE_URL=postgresql://user:pass@db:5432/mydb
      - JWT_SECRET=${JWT_SECRET}
    depends_on:
      db:
        condition: service_healthy
    networks:
      - frontend
      - backend
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:4000/health"]
      interval: 10s
      timeout: 5s
      retries: 3

  # PostgreSQL Database
  db:
    image: postgres:15-alpine
    environment:
      - POSTGRES_USER=user
      - POSTGRES_PASSWORD=pass
      - POSTGRES_DB=mydb
    volumes:
      - postgres_data:/var/lib/postgresql/data
      - ./db/init:/docker-entrypoint-initdb.d:ro
    networks:
      - backend
    restart: unless-stopped
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U user -d mydb"]
      interval: 10s
      timeout: 5s
      retries: 5

  # Redis Cache
  cache:
    image: redis:7-alpine
    command: redis-server --appendonly yes
    volumes:
      - redis_data:/data
    networks:
      - backend
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 3s
      retries: 3

networks:
  frontend:
    driver: bridge
  backend:
    driver: bridge
    internal: true  # Backend ist vom Internet isoliert

volumes:
  postgres_data:
  redis_data:

Nginx Load Balancer Konfiguration

nginx/nginx.conf

upstream api_backend {
    least_conn;  # Load Balancing Methode
    server api-service-1:3000 max_fails=3 fail_timeout=30s;
    server api-service-2:3000 max_fails=3 fail_timeout=30s;
}

upstream auth_backend {
    server auth-service:4000 max_fails=3 fail_timeout=30s;
}

server {
    listen 80;
    server_name localhost;

    # Health Check Endpoint
    location /health {
        access_log off;
        return 200 "healthy\n";
        add_header Content-Type text/plain;
    }

    # API Routes
    location /api/ {
        proxy_pass http://api_backend;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;

        # Timeouts
        proxy_connect_timeout 5s;
        proxy_send_timeout 60s;
        proxy_read_timeout 60s;

        # Health Check
        proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
    }

    # Auth Routes
    location /auth/ {
        proxy_pass http://auth_backend;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    }
}

API Service mit Health Check

api-service/server.js

const express = require('express');
const Redis = require('ioredis');
const { Pool } = require('pg');

const app = express();
const port = 3000;

// Database Pool
const pool = new Pool({
  connectionString: process.env.DATABASE_URL,
  max: 20,
  idleTimeoutMillis: 30000,
  connectionTimeoutMillis: 2000,
});

// Redis Client
const redis = new Redis(process.env.REDIS_URL, {
  retryStrategy: (times) => Math.min(times * 50, 2000),
});

// Health Check Endpoint
app.get('/health', async (req, res) => {
  try {
    // Check Database
    await pool.query('SELECT 1');

    // Check Redis
    await redis.ping();

    res.status(200).json({
      status: 'healthy',
      service: process.env.SERVICE_NAME,
      timestamp: new Date().toISOString(),
      uptime: process.uptime()
    });
  } catch (error) {
    res.status(503).json({
      status: 'unhealthy',
      error: error.message
    });
  }
});

// Graceful Shutdown
process.on('SIGTERM', async () => {
  console.log('SIGTERM received, closing connections...');
  await pool.end();
  await redis.quit();
  process.exit(0);
});

app.listen(port, () => {
  console.log(`${process.env.SERVICE_NAME} listening on port ${port}`);
});

Skalierung und Deployment

Services skalieren

# Skaliere API Services auf 5 Instanzen
docker-compose up -d --scale api-service-1=3 --scale api-service-2=2

# Prüfe Status
docker-compose ps

# Logs ansehen
docker-compose logs -f api-service-1

Rolling Updates

# Service ohne Downtime aktualisieren
docker-compose up -d --no-deps --build api-service-1

# Warte auf Health Check
sleep 10

# Nächsten Service aktualisieren
docker-compose up -d --no-deps --build api-service-2

Monitoring und Logging

Zentralisiertes Logging mit ELK Stack

Erweitere docker-compose.yml:

  # Elasticsearch
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
    environment:
      - discovery.type=single-node
    volumes:
      - es_data:/usr/share/elasticsearch/data
    networks:
      - monitoring

  # Logstash
  logstash:
    image: docker.elastic.co/logstash/logstash:8.11.0
    volumes:
      - ./logstash/pipeline:/usr/share/logstash/pipeline
    networks:
      - monitoring
    depends_on:
      - elasticsearch

  # Kibana
  kibana:
    image: docker.elastic.co/kibana/kibana:8.11.0
    ports:
      - "5601:5601"
    networks:
      - monitoring
    depends_on:
      - elasticsearch

Best Practices

1. Security

# Nutze Secrets für sensible Daten
secrets:
  db_password:
    file: ./secrets/db_password.txt

services:
  db:
    secrets:
      - db_password

2. Resource Limits

Setze immer Resource Limits um noisy neighbors zu vermeiden.

3. Health Checks

Implementiere aussagekräftige Health Checks für alle Services.

4. Network Segmentation

Isoliere Backend-Services vom öffentlichen Netzwerk.

5. Graceful Shutdown

Implementiere SIGTERM Handler für saubere Shutdowns.

Troubleshooting

# Netzwerk inspizieren
docker network inspect coding-is-love_backend

# Service Logs
docker-compose logs -f --tail=100 api-service-1

# In Container einsteigen
docker-compose exec api-service-1 sh

# Resource Usage
docker stats

Fazit

Eine gut orchestrierte Microservices-Architektur erfordert:

  • Durchdachte Netzwerk-Segmentierung
  • Robuste Health Checks
  • Effektives Load Balancing
  • Zentralisiertes Logging und Monitoring
  • Graceful Degradation

Weiterführende Themen

  • Kubernetes Migration
  • Service Mesh (Istio, Linkerd)
  • Distributed Tracing (Jaeger)
  • Auto-Scaling Strategien