-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
63 lines (53 loc) · 1.48 KB
/
docker-compose.yml
File metadata and controls
63 lines (53 loc) · 1.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
version: '3.8'
services:
userturn-lora:
build: .
image: userturn-lora:latest
container_name: userturn-lora
# GPU support
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
# Mount volumes for data persistence
volumes:
- ./output:/app/output
- ./data:/app/data
- ~/.cache/huggingface:/root/.cache/huggingface
# Environment variables
environment:
- HF_TOKEN=${HF_TOKEN}
- WANDB_API_KEY=${WANDB_API_KEY}
- HF_HUB_ENABLE_HF_TRANSFER=1
- PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:64
# Default command
command: ["--model", "Qwen/Qwen2.5-3B-Instruct"]
# Quick test run with smaller dataset using fastest model
userturn-lora-test:
build: .
image: userturn-lora:latest
container_name: userturn-lora-test
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
volumes:
- ./output:/app/output
- ~/.cache/huggingface:/root/.cache/huggingface
environment:
- HF_TOKEN=${HF_TOKEN}
- WANDB_API_KEY=${WANDB_API_KEY}
- HF_HUB_ENABLE_HF_TRANSFER=1
command: [
"--model", "LiquidAI/LFM2.5-1.2B-Instruct",
"--train-samples", "100",
"--eval-samples", "10",
"--epochs", "1",
"--no-wandb"
]