-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.gpu.yml
More file actions
49 lines (47 loc) · 1.63 KB
/
docker-compose.gpu.yml
File metadata and controls
49 lines (47 loc) · 1.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# docker-compose.gpu.yml
# Optional overlay for GPU runtime acceleration (NVIDIA)
#
# The ML worker images include CUDA libraries via PyTorch (works on CPU too).
# This overlay enables GPU RUNTIME access for ML workers on systems with NVIDIA GPUs.
# Without this overlay, PyTorch falls back to CPU inference (still functional, just slower).
#
# Usage:
# ./openaudio.sh start dev # Auto-detects GPU and applies if available
# OR manually:
# docker compose -f docker-compose.yml -f docker-compose.override.yml -f docker-compose.gpu.yml up
#
# Note: This file is automatically included by openaudio.sh when NVIDIA GPU is detected.
# On macOS or systems without NVIDIA GPU, this file is not loaded.
#
# Configuration:
# GPU_DEVICE_ID - GPU device to use (default: 0)
# Set in .env file: GPU_DEVICE_ID=1
services:
celery-torch-worker:
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
device_ids: ['${GPU_DEVICE_ID:-0}']
environment:
- NVIDIA_VISIBLE_DEVICES=${GPU_DEVICE_ID:-0}
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
- CUDA_VISIBLE_DEVICES=${GPU_DEVICE_ID:-0}
- TORCH_DEVICE=cuda
- USE_GPU=true
celery-sam-worker:
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
device_ids: ['${GPU_DEVICE_ID:-0}']
environment:
- NVIDIA_VISIBLE_DEVICES=${GPU_DEVICE_ID:-0}
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
- CUDA_VISIBLE_DEVICES=${GPU_DEVICE_ID:-0}
- TORCH_DEVICE=cuda
- USE_GPU=true