import torch
from common.utils import get_device
from trainer_manger.validator.validator_manger import ValidatorManger


class TrainerBase(object):
    def __init__(self,
                 model,
                 optimizer,
                 criterion,
                 train_loader,
                 test_loader,
                 epoch,
                 device=None,
                 loss_fn=None,
                 metric_fn=None):
        """
        训练管理器
        :param model:
        :param optimizer:
        :param criterion:
        :param train_loader:
        :param test_loader:
        :param epoch:
        :param device:
        :param loss_fn:
        :param metric_fn:
        """
        self._model = model
        self._optimizer = optimizer
        self._criterion = criterion
        self._train_loader = train_loader
        self._test_loader = test_loader
        self.device = self._get_device(device)
        self._loss_fn = loss_fn
        self._metric_fn = metric_fn
        self._epoch = epoch

        self._validator_manger = ValidatorManger(self._model, self._test_loader, self.device, self._criterion)

    @staticmethod
    def _get_device(device):
        if device is None:
            device = get_device()
            return torch.device(device)
        elif isinstance(device, str):
            return torch.device(device)
        else:
            return device

    def train(self, text=False):
        self._model.to(self.device)
        for epoch in range(self._epoch):
            epoch_index = epoch + 1
            if text:
                self._batch_train_1(epoch_index)
            else:
                self._batch_train(epoch_index)
            self._validator_manger.validate(text)

    def _batch_train(self, epoch_index):
        self._model.train()
        for batch_idx, (data, target) in enumerate(self._train_loader):
            data, target = data.to(self.device), target.to(self.device)
            self._optimizer.zero_grad()
            output = self._model(data)
            loss = self._criterion(output, target)
            loss.backward()
            self._optimizer.step()
            if batch_idx % 100 == 0:
                print(
                    f"Train Epoch: {epoch_index} [{batch_idx * len(data)}/{len(self._train_loader.dataset)}] Loss: {loss.item()}")

    def _batch_train_1(self, epoch_index):
        self._model.train()
        for batch_idx, (data, target) in enumerate(self._train_loader):
            data, target = data.to(self.device), target.to(self.device)
            self._optimizer.zero_grad()
            output, state = self._model(data)
            loss = self._criterion(output.squeeze(), target.squeeze())
            loss.sum().backward()
            ave_loss = loss.sum() / loss.numel()
            self._optimizer.step()
            if batch_idx % 100 == 0:
                print(
                    f"Train Epoch: {epoch_index} [{batch_idx * len(data)}/{len(self._train_loader.dataset)}] Loss: {ave_loss}")
