-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtrain.py
More file actions
335 lines (294 loc) · 10.7 KB
/
train.py
File metadata and controls
335 lines (294 loc) · 10.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
import os
from typing import Callable, Optional
import hydra
import torch
import wandb
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.optim.lr_scheduler import OneCycleLR
from tqdm import tqdm
from data import get_qclevr_dataloaders
from model import Conv2dEIRNN
from utils import AttrDict, seed
def train_iter(
config: AttrDict,
model: Conv2dEIRNN,
optimizer: torch.optim.Optimizer,
scheduler: Optional[torch.optim.lr_scheduler.LRScheduler],
criterion: torch.nn.Module,
train_loader: torch.utils.data.DataLoader,
wandb_log: Callable[[dict[str, float, int]], None],
epoch: int,
device: torch.device,
) -> tuple[float, float]:
"""
Perform a single training iteration.
Args:
config (AttrDict): Configuration parameters.
model (Conv2dEIRNN): The model to be trained.
optimizer (torch.optim.Optimizer): The optimizer used for training.
criterion (torch.nn.Module): The loss function.
train_loader (torch.utils.data.DataLoader): The training data loader.
wandb_log (Callable[[dict[str, float, int]], None]): Function to log training statistics to Weights & Biases.
epoch (int): The current epoch number.
device (torch.device): The device to perform computations on.
Returns:
tuple[float, float]: A tuple containing the training loss and accuracy.
"""
if config.train.grad_clip.disable:
clip_grad_ = lambda x, y: None
elif config.train.grad_clip.type == "norm":
clip_grad_ = lambda x, y: clip_grad_norm_(x, y, foreach=False)
elif config.train.grad_clip.type == "value":
clip_grad_ = lambda x, y: clip_grad_value_(x, y, foreach=False)
else:
raise NotImplementedError(
f"Gradient clipping type {config.train.grad_clip_type} not implemented"
)
model.train()
train_loss = 0.0
train_correct = 0
train_total = 0
running_loss = 0.0
running_correct = 0
running_total = 0
bar = tqdm(
train_loader,
desc=(f"Training | Epoch: {epoch} | " f"Loss: {0:.4f} | " f"Acc: {0:.2%}"),
disable=not config.tqdm,
)
for i, (cue, mixture, labels) in enumerate(bar):
cue = cue.to(device)
mixture = mixture.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(cue, mixture, all_timesteps=config.criterion.all_timesteps)
if config.criterion.all_timesteps:
losses = []
for output in outputs:
losses.append(criterion(output, labels))
loss = sum(losses) / len(losses)
outputs = outputs[-1]
else:
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
clip_grad_(model.parameters(), config.train.grad_clip.value)
optimizer.step()
if scheduler is not None:
scheduler.step()
# Update statistics
train_loss += loss.item()
running_loss += loss.item()
predicted = outputs.argmax(-1)
correct = (predicted == labels).sum().item()
train_correct += correct
running_correct += correct
train_total += len(labels)
running_total += len(labels)
# Log statistics
if (i + 1) % config.train.log_freq == 0:
running_loss /= config.train.log_freq
running_acc = running_correct / running_total
wandb_log(dict(running_loss=running_loss, running_acc=running_acc))
bar.set_description(
f"Training | Epoch: {epoch} | "
f"Loss: {running_loss:.4f} | "
f"Acc: {running_acc:.2%}"
)
running_loss = 0
running_correct = 0
running_total = 0
# Calculate average training loss and accuracy
train_loss /= len(train_loader)
train_acc = train_correct / train_total
wandb_log(dict(train_loss=train_loss, train_acc=train_acc))
return train_loss, train_acc
def eval_iter(
config: AttrDict,
model: Conv2dEIRNN,
criterion: torch.nn.Module,
val_loader: torch.utils.data.DataLoader,
wandb_log: Callable[[dict[str, float, int]], None],
epoch: int,
device: torch.device,
) -> tuple[float, float]:
"""
Perform a single evaluation iteration.
Args:
model (Conv2dEIRNN): The model to be evaluated.
criterion (torch.nn.Module): The loss function.
val_loader (torch.utils.data.DataLoader): The test data loader.
wandb_log (function): Function to log evaluation statistics to Weights & Biases.
epoch (int): The current epoch number.
device (torch.device): The device to perform computations on.
Returns:
tuple: A tuple containing the test loss and accuracy.
"""
model.eval()
test_loss = 0.0
test_correct = 0
test_total = 0
with torch.no_grad():
for cue, mixture, labels in val_loader:
cue = cue.to(device)
mixture = mixture.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(cue, mixture, all_timesteps=config.criterion.all_timesteps)
if config.criterion.all_timesteps:
losses = []
for output in outputs:
losses.append(criterion(output, labels))
loss = sum(losses) / len(losses)
outputs = outputs[-1]
else:
loss = criterion(outputs, labels)
# Update statistics
test_loss += loss.item()
predicted = outputs.argmax(-1)
correct = (predicted == labels).sum().item()
test_correct += correct
test_total += len(labels)
# Calculate average test loss and accuracy
test_loss /= len(val_loader)
test_acc = test_correct / test_total
wandb_log(dict(test_loss=test_loss, test_acc=test_acc, epoch=epoch))
return test_loss, test_acc
@hydra.main(
version_base=None,
config_path="config",
config_name="config",
)
def train(config: DictConfig) -> None:
"""
Train the model using the provided configuration.
Args:
config (dict): Configuration parameters.
"""
config = OmegaConf.to_container(config, resolve=True)
config = AttrDict(config)
# Set the random seed
if config.seed is not None:
seed(config.seed)
# Set the matmul precision
torch.set_float32_matmul_precision(config.train.matmul_precision)
# Get device and initialize the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Conv2dEIRNN(**config.model).to(device)
# Compile the model if requested
model = torch.compile(
model,
fullgraph=config.compile.fullgraph,
dynamic=config.compile.dynamic,
backend=config.compile.backend,
mode=config.compile.mode,
disable=config.compile.disable,
)
# Initialize the optimizer
if config.optimizer.fn == "sgd":
optimizer = torch.optim.SGD(
model.parameters(),
lr=config.optimizer.lr,
momentum=config.optimizer.momentum,
)
elif config.optimizer.fn == "adam":
optimizer = torch.optim.Adam(
model.parameters(),
lr=config.optimizer.lr,
betas=(config.optimizer.beta1, config.optimizer.beta2),
)
elif config.optimizer.fn == "adamw":
optimizer = torch.optim.AdamW(
model.parameters(),
lr=config.optimizer.lr,
betas=(config.optimizer.beta1, config.optimizer.beta2),
)
else:
raise NotImplementedError(f"Optimizer {config.optimizer.fn} not implemented")
# Initialize the loss function
if config.criterion.fn == "ce":
criterion = torch.nn.CrossEntropyLoss()
else:
raise NotImplementedError(f"Criterion {config.criterion.fn} not implemented")
# Get the data loaders
train_loader, val_loader = get_qclevr_dataloaders(
data_root=config.data.root,
assets_path=config.data.assets_path,
train_batch_size=config.data.batch_size,
val_batch_size=config.data.val_batch_size,
resolution=config.model.input_size,
holdout=config.data.holdout,
mode=config.data.mode,
primitive=config.data.primitive,
num_workers=config.data.num_workers,
seed=config.seed,
)
# Initialize the learning rate scheduler
if config.scheduler.fn is None:
scheduler = None
if config.scheduler.fn == "one_cycle":
scheduler = OneCycleLR(
optimizer,
max_lr=config.optimizer.lr,
total_steps=config.train.epochs * len(train_loader),
)
else:
raise NotImplementedError(f"Scheduler {config.scheduler.fn} not implemented")
# Initialize Weights & Biases
if config.wandb:
wandb.init(project="EI RNN", config=config)
wandb_log = lambda x: wandb.log(x)
else:
wandb_log = lambda x: None
# Create the checkpoint directory
if config.wandb:
checkpoint_dir = os.path.join(config.checkpoint.root, wandb.run.name)
else:
checkpoint_dir = config.checkpoint.root
os.makedirs(checkpoint_dir, exist_ok=True)
for epoch in range(config.train.epochs):
# Train the model
train_loss, train_acc = train_iter(
config,
model,
optimizer,
scheduler,
criterion,
train_loader,
wandb_log,
epoch,
device,
)
# Evaluate the model on the validation set
test_loss, test_acc = eval_iter(
config, model, criterion, val_loader, wandb_log, epoch, device
)
# Print the epoch statistics
print(
f"Epoch [{epoch}/{config.train.epochs}] | "
f"Train Loss: {train_loss:.4f} | "
f"Train Accuracy: {train_acc:.2%} | "
f"Test Loss: {test_loss:.4f}, "
f"Test Accuracy: {test_acc:.2%}"
)
# Save the model
file_path = os.path.abspath(
os.path.join(checkpoint_dir, f"checkpoint_{epoch}.pt")
)
link_path = os.path.abspath(os.path.join(checkpoint_dir, "checkpoint.pt"))
checkpoint = {
"epoch": epoch,
"model_state_dict": getattr(model, "_orig_mod", model).state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
}
torch.save(checkpoint, file_path)
try:
os.remove(link_path)
except FileNotFoundError:
pass
os.symlink(file_path, link_path)
if __name__ == "__main__":
train()