import time
from re import L

import Dagger
import numpy as np
import torch
import torch.nn as nn
from numpy.core.records import record
from torch._C import device
from torch.autograd import backward
from torchvision import transforms

import model.recorder as recorder
from model import utils
from model.utils import save_model

device = "cuda" if torch.cuda.is_available() else "cpu"


class Trainer():
    def __init__(self, configs=None) -> None:
        self.configs = configs

    def train(self,
              model: nn.Module,
              train_dataloader,
              val_dataloader,
              test_dataloader,
              task_name,
              num_epoches=None,
              loss_function=torch.nn.CrossEntropyLoss(),
              enable_early_stop=True):
        """训练数个Epoches

        Returns:
            np.mean(all_epoch_acces), np.mean(all_epoch_losses)
        """
        num_epoches = self.configs.NUM_EPOCHES if num_epoches == None else num_epoches

        model.train()
        # FIXME 清除BN
        model.no_bn()

        # TODO 参数化
        # NOTE 其实对于optimizer_args来说，最好的方法应该是让用户填写(fill)一个类 -> OptimizerArgs。而这个类的属性也最好是可以动态生成的
        # hasattr()等方法可以结合起来
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=1e-4,
                                     weight_decay=self.configs.L2_DECAY)

        all_epoch_acces = []
        all_epoch_losses = []
        max_acc = -1
        _convergence_counter = 0
        for epoch_idx in range(num_epoches):
            epoch_acc, epoch_avg_loss = self._train_one_epoch(
                model,
                train_dataloader,
                val_dataloader,
                test_dataloader,
                num_epoches,
                loss_function,
                optimizer=optimizer,
                args=self.configs)

            if epoch_acc > max_acc:
                max_acc = epoch_acc
                save_model(model,
                           epoch_idx,
                           optimizer,
                           max_acc=epoch_acc.item(),
                           min_loss=epoch_avg_loss.item(),
                           subdir=task_name)
                _convergence_counter = 0
            else:
                _convergence_counter += 1

            max_acc = epoch_acc if epoch_acc > max_acc else max_acc
            all_epoch_acces.append(epoch_acc)
            all_epoch_losses.append(epoch_avg_loss)
            # TODO validation
            print(
                "\t epoch : {0}, batch_avg_loss : {1:.2f}, avg_acc : {2:.3f}".
                format(epoch_idx + 1, epoch_avg_loss.item(), epoch_acc.item()))

            if enable_early_stop == True and _convergence_counter > 6 and epoch_acc.item(
            ) > 0.99:
                break

            # summary
            recorder.record_whole_model(model,
                                        loss=epoch_avg_loss,
                                        acc=epoch_acc)

        return np.mean(all_epoch_acces), np.mean(all_epoch_losses)

    def _train_one_epoch(self,
                         model: nn.Module,
                         train_data_loader,
                         val_data_loader,
                         test_data_loader,
                         num_epoch,
                         loss_function,
                         optimizer,
                         args,
                         enable_test=False,
                         enable_output=False):
        # NOTE 标准迭代加载器的写法
        # for batch_idx, (inputs, labels) in enumerate(train_data_loader):
        # optimizer = optimizer(model.parameters())
        batch_acces = []
        batch_losses = []
        for batch_idx, example in enumerate(train_data_loader):
            optimizer.zero_grad()
            inputs, labels = example

            # Save for check
            _img_train_tensor = inputs[0]
            _img_for_train = tensor_to_PIL(_img_train_tensor)
            _img_for_train.save("./check_train.jpg")

            # forward + backward + optimize
            outputs = model(inputs)
            labels = labels.to(device)

            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            output_argmaxed = outputs.argmax(dim=1)

            # Test
            # raise NotImplementedError("查看数据集对不对")
            if enable_test == True:
                test_idx = 1
                test_img_tensor = inputs[test_idx]
                test_img = tensor_to_PIL(test_img_tensor)
                utils.path_check("./dagger_outputs/")
                test_img.save(
                    './dagger_outputs/output_{0}_label_{1}.jpg'.format(
                        output_argmaxed[test_idx].item(),
                        labels[test_idx].item()))

            correct = (output_argmaxed == labels).float().sum()
            acc = correct / inputs.size()[0]
            batch_acces.append(acc.item())
            batch_losses.append(loss.item())

            if enable_output:
                output_classes = [
                    int(Dagger.IDX_TO_CLS_DICT.get(_.item()))
                    for _ in output_argmaxed
                ]
                output_actions = [
                    Dagger.ACTION_TO_CLS_DICT.get(_) for _ in output_classes
                ]
                labels_classes = [
                    int(Dagger.IDX_TO_CLS_DICT.get(_.item())) for _ in labels
                ]
                labels_actions = [
                    Dagger.ACTION_TO_CLS_DICT.get(_) for _ in labels_classes
                ]

                print("output", output_actions)
                print("label", labels_actions)

            print("iter : {0}, loss : {1:.2f}, acc : {2:.2f}".format(
                batch_idx, loss.item(), acc.item()))

        return np.mean(batch_acces), np.mean(batch_losses)


def tensor_to_PIL(tensor):
    image = tensor.cpu().clone()
    image = image.squeeze(0)
    image = transforms.ToPILImage()(image.float())
    return image
