"""
containers for model training and testing
"""
# author: Zichuan Wang
# E-mail: zichuan.wang@qq.com

import torch
from torch import nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR
from torch.utils.tensorboard import SummaryWriter

from path import *
from config import cfg, parser_info
from model import get_models, unfreeze_model
from confusion_matrix import confusion_matrix
from warm_scheduler import get_scheduler
from timer import Timer, TimeAccumulator
from logger import logger
from losses import mse_loss as criterion_mse, FocalLoss

import pandas as pd
import numpy as np
from typing import List
from functools import reduce


class SingleModelContainer:
    """
    basic container that contains data and statistics of a single model
    easy to be extended to model ensemble containers (e.g. `MultiModelContainer`)

    This container is mainly for single model training, validation, and testing.

    For model ensemble, see `MultiModelContainer`.
    """
    net_num = 0

    def __init__(self, model_name: str = None, split: str = None, model: nn.Module = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None, batch_summ_itr: int = 1, epoch_summ_itr: int = 1,
                 **kwargs):
        """
        split: 'train' | 'val' | 'test'
        batch_summ_itr: 0 - silence
        epoch_summ_itr: 0 - silence

        self.save_path: path to save model
            name format: MODEL_FILE_PATH`i``j`
                         i: `net_num`, 1, 2, 3, ...
                         j: 1 - val, ensemble best
                            2 - val, separate best
                            3 - test, ensemble best
                            4 - test, separate best
            in `SingleModelContainer`, j = 2 or 4 is available
            in `MultiModelContainer`, j = 1 or 3 is available

        pass_data_epoch(): should be used in the 'for' loop
        calculate_metric(): override to define custom metrics
            define new metrics in 'self.__init__()',
            also check other functions to update values
        calculate_loss(): override to define custom losses
            define new losses in 'self.__init__()',
            also check other functions to update values
        """
        assert split in ['train', 'val', 'test']
        self.model_name = model_name
        self.split = split
        self.model = model
        self.dataset = dataset
        self.dataloader = dataloader
        self.criteria = criteria
        self.timer = Timer(timer_name=self.model_name + f' ({self.split})')
        self.accumulator = TimeAccumulator(accumulator_name=self.model_name + f' ({self.split})')
        self.batch_summ_itr = batch_summ_itr
        self.epoch_summ_itr = epoch_summ_itr
        self.save_path = self.get_save_path()
        self.writer = SummaryWriter(LOG_PATH)

        # kwargs
        self.writer: SummaryWriter
        self.get_kwargs(kwargs)

        # global stat
        self.n_dataloader = len(self.dataloader)
        self.n_dataset = len(self.dataset)
        self.total_time = 0  # sec
        self.n_correct_best = 0
        self.epoch_correct_best = 0

        # epoch data
        self.n_epoch = 0
        self.cm_epoch = torch.zeros((cfg.class_num + 1, cfg.class_num + 1)).int().cuda()
        self.n_correct_epoch = 0
        self.losses_epoch = [0 for _ in range(len(self.criteria))]
        self.time_epoch = 0
        self.wrong_data_id = []  # path to wrongly classified data

        # batch data
        self.n_batch = 0
        self.losses_batch = torch.zeros(len(self.criteria)).cuda()
        self.time_batch = 0

    def get_kwargs(self, kwargs):
        self.writer = kwargs.get('writer', None)

    def get_save_path(self):

        return ''

    def pass_data_epoch(self):
        # initialize values

        self.prepare_epoch_stat()

        for sample in self.dataloader:
            sample[0] = sample[0].cuda()
            sample[1] = sample[1].cuda()

            # batch data
            self.pass_data_batch(sample)

            # summarize batch stat
            self.log_batch()

        # visualize log
        self.write_log()

        # record best
        self.update_best()

        # summarize epoch stat
        self.log_epoch()

    def update_best(self):
        if self.n_correct_epoch > self.n_correct_best:
            self.n_correct_best = self.n_correct_epoch
            self.epoch_correct_best = self.n_epoch
            self.save_model()

    def save_model(self, save_path: str = ''):
        """
        Do not save if no path is available.
        Priority: save_path > self.save_path
        """

        if save_path:
            torch.save(self.model.state_dict(), eval(save_path))
            # logger.logger.info(f'save {self.model_name} to {eval(save_path)}')
        elif self.save_path:
            torch.save(self.model.state_dict(), eval(self.save_path))
            # logger.logger.info(f'save {self.model_name} to {eval(self.save_path)}')

    def prepare_epoch_stat(self):
        self.set_model_mode()
        self.add_epoch()

        self.n_batch = 0

        self.time_epoch = 0
        self.n_correct_epoch = 0
        self.cm_epoch.zero_()
        self.losses_epoch = [0 for _ in self.losses_epoch]
        self.wrong_data_id = []

    def set_model_mode(self):
        if self.split == 'train':
            self.model.train()
        else:
            self.model.eval()

    def clock_batch_begin(self):
        self.timer.clock(f'epoch {self.n_epoch} begins', verbose=False)

    def clock_batch_end(self):
        self.time_batch = self.timer.clock(f'epoch {self.n_epoch} ends', verbose=False)
        self.time_epoch += self.time_batch
        self.total_time += self.time_batch

    def calculate_metric(self, prediction, sample):
        # confusion matrix
        labels, paths = sample[1], sample[2]
        confusion_matrix(labels, prediction, self.cm_epoch,
                         img_name=paths, wrong_imgs=self.wrong_data_id,
                         )

        # accuracy
        if self.n_batch == self.n_dataloader:
            self.n_correct_epoch = self.cm_epoch.diagonal()[:-1].sum().item()

    def calculate_loss(self, prediction, sample):
        labels = sample[1]

        self.losses_batch = torch.zeros(len(self.criteria)).cuda()
        for j, criterion in enumerate(self.criteria):
            self.losses_batch[j] = criterion(prediction, labels)
            self.losses_epoch[j] += self.losses_batch[j].item()

    def pass_data_batch(self, sample):
        self.add_batch()

        # prediction & timing
        self.clock_batch_begin()
        prediction = self.get_pred(sample)
        self.clock_batch_end()

        # metric
        self.calculate_metric(prediction, sample)

        # loss
        self.calculate_loss(prediction, sample)

        return prediction

    def add_epoch(self):
        self.n_epoch += 1

    def add_batch(self):
        self.n_batch += 1

    def get_pred(self, sample):
        return self.model(sample[0], sample[1])

    def log_batch(self, verbose=True):
        if self.batch_summ_itr > 0 and (self.n_batch % self.batch_summ_itr == 0 or self.n_batch == 1):
            pass
        else:
            return

        batch_loss_info = '[batch loss: ' + \
                          ' | '.join([f'{loss:.4f}' for loss in self.losses_batch]) + \
                          ']'

        info = f'[status: {self.split}]' \
               f'[net: {self.model_name}]' \
               \
               f'[epoch: {cfg.prefix}{self.n_epoch}/{cfg.EPOCHS}{cfg.suffix}]' \
               f'[batch: {self.n_batch}/{self.n_dataloader}]' \
               \
               f'[batch time: {self.time_batch:2f}]' + \
               batch_loss_info

        if verbose:
            logger.logger.info(info)

        return info

    def log_epoch(self, verbose=True):
        if self.epoch_summ_itr > 0 and (self.n_epoch % self.epoch_summ_itr == 0 or self.n_epoch == 1):
            pass
        else:
            return

        epoch_loss_info = '[epoch loss: ' + \
                          ' | '.join([f'{loss / self.n_dataloader:.4f}' for loss in self.losses_epoch]) + \
                          ']'
        info = f'[status: {self.split}]' \
               f'[net: {self.model_name}]' \
               \
               f'[epoch: {cfg.prefix}{self.n_epoch}/{cfg.EPOCHS}{cfg.suffix}]' \
               \
               f'[epoch time: {self.time_epoch:2f}]' + \
               epoch_loss_info + \
               f'[epoch acc: {self.n_correct_epoch / self.n_dataset * 100:.4f}%]' \
               \
               f'[best acc: {self.n_correct_best / self.n_dataset * 100:.4f}% ({self.epoch_correct_best} epoch)]'

        if verbose:
            logger.logger.info(info)
        logger.logger.info(self.cm_epoch)
        logger.logger.info(self.wrong_data_id[:cfg.num_wrong_imgs])

        return info

    def write_log(self):
        if self.writer is None:
            return

        # loss
        for i, loss in enumerate(self.losses_epoch):
            self.writer.add_scalars(f'loss_{i + 1}_{self.model_name}',
                                    {self.split: loss / self.n_dataloader},
                                    self.n_epoch)

        # metric
        self.writer.add_scalars(f'acc_{self.model_name}',
                                {self.split: self.n_correct_epoch / self.n_dataset * 100},
                                self.n_epoch)


class SingleTrainContainer(SingleModelContainer):
    """
    single model training container
    mainly add learning rate property `self.lr` and perform back propagation
    """
    def __init__(self, model_name: str = '', model: nn.Module = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None,
                 batch_summ_itr: int = cfg.train_info_interval, epoch_summ_itr: int = 1,
                 split: str = 'train',
                 **kwargs):
        SingleTrainContainer.net_num += 1
        super().__init__(model_name, split, model, dataset, dataloader,
                         criteria, batch_summ_itr, epoch_summ_itr,
                         **kwargs)

        self.optimizer = self.get_optimizer()
        self.scheduler = self.get_scheduler()

        # epoch data
        self.lr = 0

    def get_optimizer(self):
        optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, self.model.parameters()),
                                     lr=cfg.base_lr, betas=(cfg.beta1, cfg.beta2),
                                     eps=cfg.eps, weight_decay=cfg.weight_decay)
        return optimizer

    def get_scheduler(self):
        def_scheduler = f'get_scheduler(' \
                        f'warm_epoch=cfg.warm_up, ' \
                        f'max_lr=cfg.lr{SingleTrainContainer.net_num}, ' \
                        f'T_0=cfg.T_0, ' \
                        f'T_mult=cfg.T_mult, ' \
                        f'eta_min=cfg.eta_min{SingleTrainContainer.net_num})'
        def_scheduler = eval(def_scheduler)
        scheduler = LambdaLR(self.optimizer, def_scheduler)
        return scheduler

    def pass_data_epoch(self):
        super().pass_data_epoch()
        self.scheduler.step()

    def prepare_epoch_stat(self):
        super().prepare_epoch_stat()
        self.lr = self.scheduler.get_lr()[0]

    def pass_data_batch(self, sample):
        prediction = super().pass_data_batch(sample)
        self.backprop()

        return prediction

    def backprop(self):
        self.optimizer.zero_grad()
        losses = self.losses_batch.sum()
        losses.backward()
        self.optimizer.step()

    def log_epoch(self, verbose=True):
        lr_info = f'[lr: {self.lr:.6f}]'
        info = super().log_epoch(verbose=False)
        info = lr_info + info
        if verbose:
            logger.logger.info(info)

        return info


class SingleValContainer(SingleModelContainer):
    """
    single model validation container
    basically same as `SingleTestContainer`
    """
    net_num = 0

    def __init__(self, model_name: str = '', model: nn.Module = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None,
                 batch_summ_itr: int = 0, epoch_summ_itr: int = 1, split: str = 'val',
                 **kwargs):
        SingleValContainer.net_num += 1
        super().__init__(model_name, split, model, dataset, dataloader,
                         criteria, batch_summ_itr, epoch_summ_itr,
                         **kwargs)

    def get_save_path(self):
        base = 'MODEL_FILE_PATH'

        return f'{base}{SingleValContainer.net_num}2'

    def pass_data_epoch(self):
        with torch.no_grad():
            super().pass_data_epoch()


class SingleTestContainer(SingleValContainer):
    """
    single model test container
    basically same as `SingleValContainer`
    """
    net_num = 0

    def __init__(self, model_name: str = '', model: nn.Module = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None,
                 batch_summ_itr: int = 0, epoch_summ_itr: int = 1,
                 **kwargs):
        SingleTestContainer.net_num += 1
        super().__init__(model_name, model, dataset, dataloader,
                         criteria, batch_summ_itr, epoch_summ_itr, split='test',
                         **kwargs)

    def get_save_path(self):
        base = 'MODEL_FILE_PATH'

        return f'{base}{SingleTestContainer.net_num}4'


class MultiModelContainer(SingleModelContainer):
    """
    This container is mainly for model ensemble.
    All models use the same training data in each batch,
    so this is mainly for validation and test phase.

    Use this container for validation and test, or for training with the same data.

    For single model training and testing, see `SingleModelContainer`
    """
    def __init__(self, containers: List[SingleModelContainer] = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None, batch_summ_itr: int = 1, epoch_summ_itr: int = 1,
                 **kwargs):
        """
        containers: a list of containers with the same `container.split`

        pass_data_epoch(): should be used in the 'for' loop
        get_pred(): uses the output of all single models to produce the final prediction
        calculate_loss(): sum of single model losses
        """
        self.containers = containers
        split = self.assert_split_is_same()
        model_name = self.get_model_name()
        super().__init__(model_name=model_name, split=split,
                         dataset=dataset, dataloader=dataloader, criteria=criteria,
                         batch_summ_itr=batch_summ_itr, epoch_summ_itr=epoch_summ_itr,
                         **kwargs)
        self.losses_batch = [0 for _ in range(len(self.criteria))]

    def get_save_path(self):
        paths = ['' for i in range(len(self.containers))]

        return paths

    def save_model(self, save_path: str = ''):
        for container, save_path in zip(self.containers, self.save_path):
            container.save_model(save_path)

    def assert_split_is_same(self):
        splits = [container.split for container in self.containers]
        splits = set(splits)
        assert len(splits) == 1
        split = list(splits)[0]

        return split

    def get_model_name(self):
        names = [container.model_name for container in self.containers]

        return '+'.join(names)

    def prepare_epoch_stat(self):
        super().prepare_epoch_stat()
        for container in self.containers:
            container.prepare_epoch_stat()

    def set_model_mode(self):

        return

    def calculate_loss(self, prediction, sample):
        self.losses_batch = [0 for _ in range(len(self.containers))]

        for container in self.containers:
            for j in range(len(self.criteria)):
                loss = container.losses_batch[j].item()
                self.losses_batch[j] += loss
                self.losses_epoch[j] += loss

    @staticmethod
    def add(x, y):
        return x + y

    def get_pred(self, sample):

        predictions = []
        for container in self.containers:
            predictions.append(container.pass_data_batch(sample))

        return reduce(self.add, predictions)

    def update_best(self):
        super().update_best()
        for container in self.containers:
            container.update_best()

    def log_batch(self, verbose=True):
        super().log_batch()
        for container in self.containers:
            container.log_batch()

    def log_epoch(self, verbose=True):
        super().log_epoch()
        for container in self.containers:
            container.log_epoch()

    def write_log(self):
        super().write_log()
        for container in self.containers:
            container.write_log()


class MultiTrainContainer(MultiModelContainer):
    """
    Multi model validation container
    Train model with the same batch data.
    """
    def __init__(self, containers: List[SingleTrainContainer] = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None,
                 batch_summ_itr: int = cfg.train_info_interval, epoch_summ_itr: int = 1,
                 **kwargs):
        super().__init__(containers, dataset, dataloader, criteria, batch_summ_itr, epoch_summ_itr,
                         **kwargs)


class MultiValContainer(MultiModelContainer):
    """
    Multi model validation container
    basically same as `MultiTestContainer`
    """
    def __init__(self, containers: List[SingleValContainer] = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None, batch_summ_itr: int = 0, epoch_summ_itr: int = 1,
                 **kwargs):
        super().__init__(containers, dataset, dataloader, criteria, batch_summ_itr, epoch_summ_itr,
                         **kwargs)

    def get_save_path(self):
        base = 'MODEL_FILE_PATH'
        paths = [f'{base}{i + 1}1' for i in range(len(self.containers))]

        return paths

    def pass_data_epoch(self):
        with torch.no_grad():
            super().pass_data_epoch()


class MultiTestContainer(MultiValContainer):
    """
    Multi model test container
    basically same as `MultiValContainer`
    """
    def __init__(self, containers: List[SingleTestContainer] = None,
                 dataset: Dataset = None, dataloader: DataLoader = None,
                 criteria: list = None, batch_summ_itr: int = 0, epoch_summ_itr: int = 1,
                 **kwargs):
        super().__init__(containers, dataset, dataloader, criteria, batch_summ_itr, epoch_summ_itr,
                         **kwargs)
        self.save_paths = self.get_save_path()

    def get_save_path(self):
        base = 'MODEL_FILE_PATH'
        paths = [f'{base}{i + 1}3' for i in range(len(self.containers))]

        return paths


if __name__ == '__main__':
    from dataset import BaldClassificationDataset
    if cfg.multi_model:
        net1, net2 = get_models()
    else:
        net1 = get_models()
    dataset_val = BaldClassificationDataset(split='test')
    dataloader_val = DataLoader(dataset_val, batch_size=cfg.BATCH)
    criterion_ce = FocalLoss() if cfg.use_focal else nn.BCELoss()
    writer = SummaryWriter(LOG_PATH)
    val1 = SingleValContainer('resnest50', net1, dataset_val, dataloader_val, [criterion_ce], writer=writer)
    val2 = SingleValContainer('efficientnetb4', net2, dataset_val, dataloader_val, [criterion_ce], writer=writer)
    valc = MultiValContainer([val1, val2], dataset_val, dataloader_val, [criterion_ce], writer=writer)
    a = time.time()
    valc.pass_data_epoch()
    print(time.time() - a)

    # a1 = SingleValContainer(dataset=[], dataloader=[])
    # print(a1.net_num, a1.save_path)
    # a2 = SingleValContainer(dataset=[], dataloader=[])
    # print(a2.net_num, a2.save_path)
    # a3 = MultiValContainer([a1, a2], dataset=[], dataloader=[])
    # print(a3.net_num, a3.save_path)
    #
    # b1 = SingleTestContainer(dataset=[], dataloader=[])
    # print(b1.net_num, b1.save_path)
    # b2 = SingleTestContainer(dataset=[], dataloader=[])
    # print(b2.net_num, b2.save_path)
    # b3 = MultiTestContainer([a1, a2], dataset=[], dataloader=[])
    # print(b3.net_num, b3.save_path)
