from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader

from .worker import Worker


class Trainer(Worker):
    def __init__(self, dataloader: DataLoader) -> None:
        super().__init__(dataloader)

    def process_batch(self, batch_data):
        self.runner.batch_current += 1
        inputs, targets, labels = batch_data
        outputs, loss_dict = self.runner.model.forward(
            inputs, targets, use_amp=self.runner.enable_amp
        )
        self.scaler.scale(loss_dict["total_loss"]).backward()
        self.scaler.step(self.runner.model.optimizer)
        self.scaler.update()
        self.runner.model.optimizer.zero_grad()
        for sl in self.runner.sub_loggers:
            sl.log_train(loss_dict, self.runner.batch_current)

    def set_runner(self, runner):
        super().set_runner(runner)
        self.scaler = GradScaler(enabled=self.runner.cfg.train.use_amp)
        return

    def before_work_hook(self) -> None:
        self.runner.model.train()
        self.runner.epoch_current += 1
        return

    def after_work_hook(self) -> None:
        self.runner.model.scheduler.step()
        return

    def get_desc(self) -> str:
        return self.DESC_TEMPLATE.format(job_name="Train", step=self.runner.epoch_current)
