Create docker-compose.yml
Browse files- docker-compose.yml +165 -0
docker-compose.yml
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
helion-inference:
|
| 5 |
+
build:
|
| 6 |
+
context: .
|
| 7 |
+
dockerfile: Dockerfile
|
| 8 |
+
args:
|
| 9 |
+
- CUDA_VERSION=12.1.1
|
| 10 |
+
image: helion:2.5-rnd
|
| 11 |
+
container_name: helion-inference-server
|
| 12 |
+
|
| 13 |
+
deploy:
|
| 14 |
+
resources:
|
| 15 |
+
reservations:
|
| 16 |
+
devices:
|
| 17 |
+
- driver: nvidia
|
| 18 |
+
count: all
|
| 19 |
+
capabilities: [gpu]
|
| 20 |
+
|
| 21 |
+
environment:
|
| 22 |
+
- MODEL_PATH=/models/helion
|
| 23 |
+
- MODEL_NAME=DeepXR/Helion-2.5-Rnd
|
| 24 |
+
- PORT=8000
|
| 25 |
+
- HOST=0.0.0.0
|
| 26 |
+
- TENSOR_PARALLEL_SIZE=2
|
| 27 |
+
- MAX_MODEL_LEN=131072
|
| 28 |
+
- GPU_MEMORY_UTILIZATION=0.95
|
| 29 |
+
- CUDA_VISIBLE_DEVICES=0,1
|
| 30 |
+
- WORKERS=1
|
| 31 |
+
|
| 32 |
+
# Optional: HuggingFace token for private models
|
| 33 |
+
# - HF_TOKEN=your_token_here
|
| 34 |
+
|
| 35 |
+
# Logging
|
| 36 |
+
- LOG_LEVEL=info
|
| 37 |
+
- LOG_FILE=/app/logs/helion.log
|
| 38 |
+
|
| 39 |
+
volumes:
|
| 40 |
+
- ./models:/models:ro
|
| 41 |
+
- ./logs:/app/logs
|
| 42 |
+
- ./cache:/app/cache
|
| 43 |
+
- ./inference:/app/inference:ro
|
| 44 |
+
- ./model_config.yaml:/app/model_config.yaml:ro
|
| 45 |
+
|
| 46 |
+
ports:
|
| 47 |
+
- "8000:8000"
|
| 48 |
+
- "8001:8001" # Metrics
|
| 49 |
+
- "8002:8002" # Admin
|
| 50 |
+
|
| 51 |
+
healthcheck:
|
| 52 |
+
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
| 53 |
+
interval: 30s
|
| 54 |
+
timeout: 10s
|
| 55 |
+
retries: 3
|
| 56 |
+
start_period: 120s
|
| 57 |
+
|
| 58 |
+
restart: unless-stopped
|
| 59 |
+
|
| 60 |
+
networks:
|
| 61 |
+
- helion-network
|
| 62 |
+
|
| 63 |
+
logging:
|
| 64 |
+
driver: "json-file"
|
| 65 |
+
options:
|
| 66 |
+
max-size: "100m"
|
| 67 |
+
max-file: "10"
|
| 68 |
+
|
| 69 |
+
prometheus:
|
| 70 |
+
image: prom/prometheus:latest
|
| 71 |
+
container_name: helion-prometheus
|
| 72 |
+
|
| 73 |
+
volumes:
|
| 74 |
+
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
| 75 |
+
- prometheus-data:/prometheus
|
| 76 |
+
|
| 77 |
+
ports:
|
| 78 |
+
- "9090:9090"
|
| 79 |
+
|
| 80 |
+
command:
|
| 81 |
+
- '--config.file=/etc/prometheus/prometheus.yml'
|
| 82 |
+
- '--storage.tsdb.path=/prometheus'
|
| 83 |
+
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
|
| 84 |
+
- '--web.console.templates=/usr/share/prometheus/consoles'
|
| 85 |
+
|
| 86 |
+
networks:
|
| 87 |
+
- helion-network
|
| 88 |
+
|
| 89 |
+
restart: unless-stopped
|
| 90 |
+
|
| 91 |
+
grafana:
|
| 92 |
+
image: grafana/grafana:latest
|
| 93 |
+
container_name: helion-grafana
|
| 94 |
+
|
| 95 |
+
environment:
|
| 96 |
+
- GF_SECURITY_ADMIN_PASSWORD=admin
|
| 97 |
+
- GF_USERS_ALLOW_SIGN_UP=false
|
| 98 |
+
|
| 99 |
+
volumes:
|
| 100 |
+
- grafana-data:/var/lib/grafana
|
| 101 |
+
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
|
| 102 |
+
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources:ro
|
| 103 |
+
|
| 104 |
+
ports:
|
| 105 |
+
- "3000:3000"
|
| 106 |
+
|
| 107 |
+
networks:
|
| 108 |
+
- helion-network
|
| 109 |
+
|
| 110 |
+
depends_on:
|
| 111 |
+
- prometheus
|
| 112 |
+
|
| 113 |
+
restart: unless-stopped
|
| 114 |
+
|
| 115 |
+
nginx:
|
| 116 |
+
image: nginx:alpine
|
| 117 |
+
container_name: helion-nginx
|
| 118 |
+
|
| 119 |
+
volumes:
|
| 120 |
+
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
| 121 |
+
- ./nginx/ssl:/etc/nginx/ssl:ro
|
| 122 |
+
|
| 123 |
+
ports:
|
| 124 |
+
- "80:80"
|
| 125 |
+
- "443:443"
|
| 126 |
+
|
| 127 |
+
networks:
|
| 128 |
+
- helion-network
|
| 129 |
+
|
| 130 |
+
depends_on:
|
| 131 |
+
- helion-inference
|
| 132 |
+
|
| 133 |
+
restart: unless-stopped
|
| 134 |
+
|
| 135 |
+
redis:
|
| 136 |
+
image: redis:7-alpine
|
| 137 |
+
container_name: helion-redis
|
| 138 |
+
|
| 139 |
+
command: redis-server --appendonly yes
|
| 140 |
+
|
| 141 |
+
volumes:
|
| 142 |
+
- redis-data:/data
|
| 143 |
+
|
| 144 |
+
ports:
|
| 145 |
+
- "6379:6379"
|
| 146 |
+
|
| 147 |
+
networks:
|
| 148 |
+
- helion-network
|
| 149 |
+
|
| 150 |
+
restart: unless-stopped
|
| 151 |
+
|
| 152 |
+
networks:
|
| 153 |
+
helion-network:
|
| 154 |
+
driver: bridge
|
| 155 |
+
ipam:
|
| 156 |
+
config:
|
| 157 |
+
- subnet: 172.28.0.0/16
|
| 158 |
+
|
| 159 |
+
volumes:
|
| 160 |
+
prometheus-data:
|
| 161 |
+
driver: local
|
| 162 |
+
grafana-data:
|
| 163 |
+
driver: local
|
| 164 |
+
redis-data:
|
| 165 |
+
driver: local
|