import os
import time
from tqdm import tqdm
from pathlib import Path

import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms

from dataset import *
from utils import *

log = Logger(os.path.join('.', 'log', 'log_model.txt'))


class BYOLTrainer:
    """Implementation of the training process of BYOL architecture.

    Attributes:
        online: Backbone model with projection head and prediction head.
        target: Backbone model with projection head.
        predictor: Prediction mlp head.
        optimizer: Optimizer.
        scheduler: Scheduler.
        device: 'cuda' or 'cpu'.
        params: Input parameters.
        classes: Total classes.

    """
    def __init__(self,
                 online: nn.Module,
                 target: nn.Module,
                 predictor: nn.Module,
                 optimizer: torch.optim,
                 scheduler: torch.optim,
                 device: str,
                 params,
                 classes: int = 48) -> None:
        log.logger.info(params)
        self.online = online
        self.target = target
        self.predictor = predictor
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.device = device
        self.epochs = params.epochs
        self.m = params.momentum_update
        self.classes = classes
        self.batch_size = params.batch_size
        self.save_model = params.save_model
        self.tb = SummaryWriter(os.path.join('.', 'events_model'))

    def initialize_target_network(self):
        """Initialize target network with the parameters of online network.

        No gradient information is retained.

        """
        for param_q, param_k in zip(self.online.parameters(), self.target.parameters()):
            param_k.data.copy_(param_q.data)
            param_k.requires_grad = False

    @torch.no_grad()
    def update_target_network_parameters(self):
        """Update target network parameters with momentum."""
        for param_q, param_k in zip(self.online.parameters(), self.target.parameters()):
            param_k.data = param_k.data * self.m + param_q.data * (1 - self.m)

    def manage_batch_data(self, x: torch.tensor, label: torch.tensor):
        """Manage batch data for learning.
        
        Args:
            x: Batch data of shape b x 12 x 1 x W x H.
            y: Batch label of shape b x 1.
        
        Returns:
            x0: The first six views in each group of the 12 views, tensor shape of b * 6, 1, W, H.
            x1: The last six views in each group of the 12 views, tensor shape of b * 6, 1, W, H.
            label: The expanded label corresponds to x0 and x1, tensor shape of b * 6.

        """
        label = label.unsqueeze(1).expand(label.shape[0], 6).reshape(-1)
        c, h, w = x.size()[-3:]
        x = x.view(-1, c, h, w)
        batch = int(x.size(0))
        index = (torch.arange(batch) % 12 < 6)
        x0 = x[index]
        index = (torch.arange(batch) % 12 >= 6)
        x1 = x[index]
        x0 = x0.to(self.device)
        x1 = x1.to(self.device)
        label = label.to(self.device)
        return x0, x1, label

    def train(self, batch_size: int = 128, num_workers: int = 0):
        """Train the BYOL based model.
        
        Args:
            batch_size: Batch size.
            num_workers: How many subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
        
        """
        transform_train = transforms.Compose([
            transforms.RandomChoice([transforms.RandomCrop(224),
                                     transforms.RandomRotation(degrees=(0, 180), fill=255)]),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.ToTensor(),
            # shrec14-shapes
            transforms.Normalize(mean=0.9731981754302979, std=0.11050379276275635),
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            # shrec14-shapes
            transforms.Normalize(mean=0.9731981754302979, std=0.11050379276275635),
        ])
        log.logger.info("----- Start loading data -----")
        cad_train_data = ModelViewDataset(transform=transform_train, split='train')
        # cad_test_data = ModelViewDataset(transform=transform_test, split='test')
        cad_train_loader = DataLoader(dataset=cad_train_data,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=num_workers)
        # cad_test_loader = DataLoader(dataset=cad_test_data,
        #                              batch_size=batch_size,
        #                              shuffle=True,
        #                              num_workers=num_workers)
        log.logger.info("----- Loading data completed -----")

        self.initialize_target_network()
        max_acc = 85
        metrics = MetricFactory(self.classes, self.device)
        log.logger.info("----- Start training -----")
        for epoch in range(self.epochs):
            # train
            loss_rec = AverageMeter()
            loss_byol_rec = AverageMeter()
            loss_cls_rec = AverageMeter()
            start = time.time()
            for batch in cad_train_loader:
                x, label, _ = batch
                x0, x1, label = self.manage_batch_data(x, label)

                loss, loss_byol, loss_cls = self.update(x0, x1, label, metrics)
                loss_rec.update(loss, self.batch_size)
                loss_byol_rec.update(loss_byol, self.batch_size)
                loss_cls_rec.update(loss_cls, self.batch_size)

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                self.update_target_network_parameters()

            run_time = time.time() - start
            self.log_record(epoch + 1, run_time, loss_byol_rec.avg, loss_cls_rec.avg, loss_rec.avg, metrics, 'train')

            # metrics.reset()

            # test
            # with torch.no_grad():
            #     loss_rec = AverageMeter()
            #     loss_byol_rec = AverageMeter()
            #     loss_cls_rec = AverageMeter()
            #     start = time.time()
            #     for batch in cad_test_loader:
            #         x, label, _ = batch
            #         x0, x1, label = self.manage_batch_data(x, label)

            #         loss, loss_byol, loss_cls = self.update(x0, x1, label, metrics)
            #         loss_rec.update(loss, self.batch_size)
            #         loss_byol_rec.update(loss_byol, self.batch_size)
            #         loss_cls_rec.update(loss_cls, self.batch_size)

            #     run_time = time.time() - start

            #     self.log_record(epoch + 1, run_time, loss_byol_rec.avg, loss_cls_rec.avg, loss_rec.avg, metrics, 'test')

            self.store_model(metrics.get('ACC'), max_acc, epoch + 1)
            max_acc = max(metrics.get('ACC'), max_acc)

            metrics.reset()

            self.scheduler.step()

        log.logger.info("Max ACC: {}".format(max_acc))
        log.logger.info("----- Complete the training -----")

    def update(self, x0: torch.tensor, x1: torch.tensor, label: torch.tensor, metrics: MetricFactory):
        """Update parameters and metrics.

        Args:
            x0: The first six views in each group of the 12 views, tensor shape of b * 6, 1, W, H.
            x1: The last six views in each group of the 12 views, tensor shape of b * 6, 1, W, H.
            label: The expanded label corresponds to x0 and x1, tensor shape of b * 6.
            metrics: ACC, NN, FT, ST, E, DCG, mAP.

        """
        projection1, prediction1 = self.online(x0, return_cls=True)
        projection2, prediction2 = self.online(x1, return_cls=True)
        prediction_view_1 = self.predictor(projection1)
        prediction_view_2 = self.predictor(projection2)

        with torch.no_grad():
            targets_view_1 = self.target(x0)
            targets_view_2 = self.target(x1)

        loss_byol = regression_loss(prediction_view_1, targets_view_2)
        loss_byol += regression_loss(prediction_view_2, targets_view_1)
        loss_byol = loss_byol.mean()
        loss_cls = cross_entropy_loss(prediction1, label)
        loss_cls += cross_entropy_loss(prediction2, label)
        loss = loss_byol + loss_cls
        metrics.update(prediction1, label)
        metrics.update(prediction2, label)

        return loss, loss_byol, loss_cls

    def log_record(self, epoch: int, run_time, loss_byol: torch.tensor, loss_cls: torch.tensor, loss: torch.tensor,
                   metrics: MetricFactory, mode: str):
        """Print logs to the console, record logs to log.txt file, record loss and accuracy to tensorboardX.

        Args:
            epoch: Current epoch.
            run_time: Run time of current epoch.
            loss_byol: BYOL loss.
            loss_cls: Classification loss.
            loss: Total loss = BYOL loss + classification loss.
            metrics: Accuracy, Recall, Precision.
            mode: 'train' or 'test'.        
        
        """
        m = metrics.compute()
        info = "Epoch-{}: {:03d}/{:03d}\t run_time:{:.4f}\tloss: {:.4f}\tloss_BYOL: {:.4f}\tloss_cls: {:.4f}\tACC:{:.4f}\tNN: {:.4f}\tFT: {:.4f}\tST: {:.4f}\tE: {:.4f}".format(
            mode, epoch, self.epochs, run_time, loss, loss_byol, loss_cls, m['ACC'], m['NN'], m['FT'], m['ST'], m['E'])
        if mode == 'test':
            info += '\n'
        log.logger.info(info)
        self.tb.add_scalar(mode + '/loss_byol', loss_byol, epoch)
        self.tb.add_scalar(mode + '/loss_cls', loss_cls, epoch)
        self.tb.add_scalar(mode + '/loss', loss, epoch)
        self.tb.add_scalar(mode + '/ACC', m['ACC'], epoch)
        self.tb.add_scalar(mode + '/NN', m['NN'], epoch)
        self.tb.add_scalar(mode + '/FT', m['FT'], epoch)
        self.tb.add_scalar(mode + '/ST', m['ST'], epoch)
        self.tb.add_scalar(mode + '/E', m['E'], epoch)

    def store_model(self, acc: float, max_acc: float, epoch: int):
        """Save the model as .pth file.
        
        Args:
            acc: Accuracy of current epoch.
            max_acc: Max accuracy.
            epoch: Current epoch.
        
        """
        if self.save_model and acc > max_acc:
            Path("checkpoints").mkdir(parents=True, exist_ok=True)
            torch.save(
                {
                    'online_network_state_dict': self.online.state_dict(),
                    'target_network_state_dict': self.target.state_dict(),
                    'optimizer_state_dict': self.optimizer.state_dict(),
                },
                os.path.join('.', 'checkpoints',
                             str(epoch) + '_' + str(acc) + str(time.strftime('%Y%m%d%H%M%S')) + '_model.pth'))

    def save_features(self, backbone: str = 'resnet-50', num_workers: int = 0, pretrained: bool = True):
        """Save features of 3D models' views to file.
        
        Args:
            backbone: Backbone of the network, 'resnet-50' or 'resnet-18'.
            num_workers: How many subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
            pretrained: Whether to use prettrained model.

        """
        features_path = os.path.join('.', 'features')
        if not os.path.exists(features_path):
            os.makedirs(features_path)
        if pretrained:
            pth = None
            if backbone == 'resnet-50':
                # pth = torch.load(os.path.join('.', 'pth', 'shrec14', 'shrec14_shape_resnet50_80.4832.pth'))
                # features_path = os.path.join(features_path, 'shrec14_shape_features_resnet50.pth')
                # pth = torch.load(os.path.join('.', 'pth', 'shrec13', 'shrec13_shape_resnet50_94.2303.pth'))
                # features_path = os.path.join(features_path, 'shrec13_shape_features_resnet50.pth')
                pth = torch.load(os.path.join('.', 'pth', 'part-shrec14', 'part-shape_resnet50_85.5540_model.pth'))
                features_path = os.path.join(features_path, 'by_part_shrec14_part_shape_features_resnet50.pth')
            else:
                log.logger.info("----- Wrong backbone input -----")
                return
            self.online.load_state_dict(pth['online_network_state_dict'], False)
            self.target.load_state_dict(pth['target_network_state_dict'], False)

        log.logger.info("----- Start loading data -----")
        transform = transforms.Compose([
            transforms.ToTensor(),
            # shrec13
            # transforms.Normalize(mean=0.9859437942504883, std=0.1182224228978157),
            # shrec14-views
            # transforms.Normalize(mean=0.9675745964050293, std=0.18061445653438568),
            # shrec14-shapes
            transforms.Normalize(mean=0.9731981754302979, std=0.11050379276275635),
            # part-shrec14
            # transforms.Normalize(mean=0.9675745964050293, std=0.18061445653438568),
        ])
        cad_train_data = ModelViewDataset(transform=transform, split='train')
        cad_test_data = ModelViewDataset(transform=transform, split='test')
        cad_train_loader = DataLoader(dataset=cad_train_data, batch_size=1, num_workers=num_workers)
        cad_test_loader = DataLoader(dataset=cad_test_data, batch_size=1, num_workers=num_workers)
        log.logger.info("----- Loading data completed -----")

        log.logger.info("----- Start saving features -----")
        features_memory = {'train': {}, 'test': {}}
        self.push_features_memory(cad_train_loader, features_memory, 'train')
        self.push_features_memory(cad_test_loader, features_memory, 'test')
        torch.save(features_memory, features_path)
        log.logger.info("----- Saving features completed -----")

    @torch.no_grad()
    def push_features_memory(self, dataLoader: DataLoader, features_memory: dict, split: str):
        """Use Average pool to get features form dataLoader to features_memory.

        Args:
            dataLoader: `cad_train_loader` or `cad_test_loader`.
            features_memory: Dictionary of features.
            split: 'train' or 'test'.
        
        """
        for batch in tqdm(dataLoader):
            x, label, id = batch
            x0, x1, _ = self.manage_batch_data(x, label)
            _, f0 = self.online(x0, return_features=True)
            _, f1 = self.online(x1, return_features=True)
            id = id[0]
            label = str(label.item())
            # join 12 features together
            f = torch.cat((f0, f1), 0)
            # average pool
            f = torch.mean(f, 0, True)
            if label not in features_memory[split]:
                features_memory[split][label] = []
            features_memory[split][label].append({id: f})