|
|
import copy |
|
|
from tqdm import tqdm |
|
|
|
|
|
import torch |
|
|
from trainer.build import TRAINER_REGISTRY |
|
|
from trainer.build import BaseTrainer |
|
|
|
|
|
|
|
|
@TRAINER_REGISTRY.register() |
|
|
class DebugTrainer(BaseTrainer): |
|
|
def __init__(self, cfg): |
|
|
super().__init__(cfg) |
|
|
self.best_metric = -1 |
|
|
|
|
|
def forward(self, data_dict): |
|
|
return self.model(data_dict) |
|
|
|
|
|
def backward(self, loss): |
|
|
self.optimizer.zero_grad() |
|
|
self.accelerator.backward(loss) |
|
|
if self.grad_norm is not None and self.accelerator.sync_gradients: |
|
|
self.accelerator.clip_grad_norm_(self.model.parameters(), self.grad_norm) |
|
|
self.optimizer.step() |
|
|
self.scheduler.step() |
|
|
|
|
|
def train_step(self, epoch): |
|
|
self.model.train() |
|
|
loader = self.data_loaders["train"] |
|
|
pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process), |
|
|
desc=f"[Epoch {epoch + 1}/{self.epochs}]") |
|
|
for i, data_dict in enumerate(loader): |
|
|
with self.accelerator.accumulate(self.model): |
|
|
data_dict['cur_step'] = epoch * len(loader) + i |
|
|
data_dict['total_steps'] = self.total_steps |
|
|
|
|
|
pbar.update(1) |
|
|
|
|
|
@torch.no_grad() |
|
|
def eval_step(self, epoch): |
|
|
self.model.eval() |
|
|
loader = self.data_loaders["val"] |
|
|
pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process)) |
|
|
for i, data_dict in enumerate(loader): |
|
|
pbar.update(1) |
|
|
return |
|
|
|
|
|
@torch.no_grad() |
|
|
def test_step(self): |
|
|
self.model.eval() |
|
|
loader = self.data_loaders["test"] |
|
|
pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process)) |
|
|
for i, data_dict in enumerate(loader): |
|
|
pbar.update(1) |
|
|
return |
|
|
|
|
|
def run(self): |
|
|
if self.mode == "train": |
|
|
start_epoch = self.exp_tracker.epoch |
|
|
self.global_step = start_epoch * len(self.data_loaders["train"]) |
|
|
for epoch in range(start_epoch, self.epochs): |
|
|
self.exp_tracker.step() |
|
|
self.train_step(epoch) |
|
|
|
|
|
if self.epochs_per_eval and (epoch + 1) % self.epochs_per_eval == 0: |
|
|
self.eval_step(epoch) |
|
|
break |
|
|
|
|
|
self.test_step() |
|
|
if self.mode == "train": |
|
|
self.accelerator.end_training() |
|
|
|