import multiprocessing
from joblib import Parallel, delayed
import os
from pathlib import Path
import time

import numpy as np
from sqlalchemy import false
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter

from dataset import *
from utils import *

log = Logger(os.path.join('.', 'log', 'log_metric.txt'))


class MetricTrainer:
    """Implementation of the training process of student's architecture.
    Attributes:
        sk_backbone: sk_backbone model with projection head and prediction head.
        cad_backbone: cad_backbone model with projection head and prediction head.
        optimizer: Optimizer.
        scheduler: Scheduler.
        device: 'cuda' or 'cpu'.
        classes: Total classes.
        cad_features: 3D models' features from teacher model.
        params: Input parameters.
    """
    def __init__(self, sk_backbone: nn.Module, cad_backbone: nn.Module, sk_optimizer: torch.optim,
                 cad_optimizer: torch.optim, sk_scheduler: torch.optim, cad_scheduler: torch.optim, device: str,
                 classes: int, params) -> None:
        log.logger.info(params)
        self.tb = SummaryWriter(os.path.join('.', 'events_metric'))
        self.sk_backbone = sk_backbone
        self.cad_backbone = cad_backbone
        self.sk_optimizer = sk_optimizer
        self.cad_optimizer = cad_optimizer
        self.sk_scheduler = sk_scheduler
        self.cad_scheduler = cad_scheduler
        self.device = device
        self.epochs = params.epochs
        self.classes = classes
        self.batch_size = params.batch_size
        self.save_model = params.save_model
        self.num_workers = params.num_workers

    def train(self):
        """Train the student model.
        
        Args:
            batch_size: Batch size.
            num_workers: How many subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
        
        """
        log.logger.info("----- Start loading data -----")
        # feature

        # sk_feature = os.path.join('.', 'features', 'by_part_shrec14_sketch_features_resnet50.pth')
        # cad_feature = os.path.join('.', 'features', 'by_part_shrec14_shape_features_resnet50.pth')
        sk_feature = os.path.join('.', 'features', 'shrec14_sketch_features_resnet50.pth')
        cad_feature = os.path.join('.', 'features', 'shrec14_shape_features_resnet50.pth')
        # sk_feature = os.path.join('.', 'features', 'shrec13_sketch_features_resnet50.pth')
        # cad_feature = os.path.join('.', 'features', 'shrec13_shape_features_resnet50.pth')
        # sk_feature = os.path.join('.', 'features', 'part-shrec14_sketch_features_resnet50.pth')
        # cad_feature = os.path.join('.', 'features', 'part-shrec14_shape_features_resnet50.pth')

        sk_feature_train_data = FeatureDataset(sk_feature, 'train')
        sk_feature_test_data = FeatureDataset(sk_feature, 'test')
        sk_feature_train_loader = DataLoader(dataset=sk_feature_train_data,
                                             batch_size=self.batch_size,
                                             shuffle=True,
                                             num_workers=self.num_workers)
        sk_feature_test_loader = DataLoader(dataset=sk_feature_test_data,
                                            batch_size=self.batch_size,
                                            shuffle=True,
                                            num_workers=self.num_workers)
        cad_feature_train_data = FeatureDataset(cad_feature, 'train')
        cad_feature_test_data = FeatureDataset(cad_feature, 'test')
        cad_feature_train_loader = DataLoader(dataset=cad_feature_train_data,
                                              batch_size=self.batch_size,
                                              shuffle=True,
                                              num_workers=self.num_workers)
        cad_feature_test_loader = DataLoader(dataset=cad_feature_test_data,
                                             batch_size=self.batch_size,
                                             shuffle=True,
                                             num_workers=self.num_workers)
        log.logger.info("----- Loading data completed -----")

        max_mAP = 0.1

        log.logger.info("----- Start training -----")
        for epoch in range(self.epochs):
            # train
            loss_rec = AverageMeter()
            loss_tri_sk_rec = AverageMeter()
            loss_tri_cad_rec = AverageMeter()
            loss_tri_mix_rec = AverageMeter()
            start = time.time()
            for _, batch in enumerate(cad_feature_train_loader):
                # x shape: torch.Size([64, 2048])
                # label shape: torch.Size([64])
                x, label, _ = batch
                pos_cad, neg_cad = cad_feature_train_data.get_contrast_sample(label)
                pos_sk, neg_sk = sk_feature_train_data.get_contrast_sample(label)

                loss, loss_tri_sk, loss_tri_cad, loss_tri_mix = self.update(x, pos_cad, neg_cad, pos_sk, neg_sk, label)
                loss_rec.update(loss, self.batch_size)
                loss_tri_sk_rec.update(loss_tri_sk, self.batch_size)
                loss_tri_cad_rec.update(loss_tri_cad, self.batch_size)
                loss_tri_mix_rec.update(loss_tri_mix, self.batch_size)

                self.sk_optimizer.zero_grad()
                self.cad_optimizer.zero_grad()
                loss.backward()
                self.sk_optimizer.step()
                self.cad_optimizer.step()
            run_time = time.time() - start
            mAP_train = self.test(sk_feature_train_loader, cad_feature_train_loader)
            self.log_record(epoch + 1, run_time, loss_rec.avg, loss_tri_sk_rec.avg, loss_tri_cad_rec.avg,
                            loss_tri_mix_rec.avg, mAP_train, 'train')

            # test
            start = time.time()
            mAP_test = self.test(sk_feature_test_loader, cad_feature_test_loader)
            run_time = time.time() - start

            self.log_record(epoch + 1, run_time, None, None, None, None, mAP_test, 'test')
            self.store_model(mAP_test, max_mAP, epoch + 1)
            max_mAP = max(max_mAP, mAP_test)

            self.sk_scheduler.step()
            self.cad_scheduler.step()

        log.logger.info("Max mAP: {}".format(max_mAP))
        log.logger.info("----- Complete the training -----")

    def update(self, anchor_cad: torch.tensor, pos_cad: torch.tensor, neg_cad: torch.tensor, pos_sk: torch.tensor,
               neg_sk: torch.tensor, label: torch.tensor):
        """Update parameters and metrics.
        Args:
            x: Query sketches, tensor shape of b, 1, W, H.
            label: Labels correspond to x, tensor shape of b.
            metrics: ACC, NN, FT, ST, E, DCG, mAP.
        """

        anchor_cad = anchor_cad.to(self.device)
        pos_cad = pos_cad.to(self.device)
        neg_cad = neg_cad.to(self.device)
        pos_sk = pos_sk.to(self.device)
        neg_sk = neg_sk.to(self.device)
        label = label.to(self.device)
        anchor_cad_f = self.cad_backbone(anchor_cad)
        pos_cad_f, neg_cad_f = self.cad_backbone(pos_cad), self.cad_backbone(neg_cad)
        pos_sk_f, neg_sk_f = self.sk_backbone(pos_sk), self.sk_backbone(neg_sk)
        loss_tri_sk = triplet_margin_loss(anchor_cad_f, pos_sk_f, neg_sk_f)
        loss_tri_cad = triplet_margin_loss(anchor_cad_f, pos_cad_f, neg_cad_f)
        loss_tri_mix = triplet_margin_loss(pos_sk_f, pos_cad_f, neg_cad_f)
        loss = loss_tri_sk + loss_tri_mix + loss_tri_cad
        return loss, loss_tri_sk, loss_tri_cad, loss_tri_mix

    @torch.no_grad()
    def test(self, sk_feature_loader, cad_feature_loader, get_visual=False):
        if get_visual:
            sk_f, sk_l, sk_id, cad_f, cad_l, cad_id = self.get_metric_features(sk_feature_loader, cad_feature_loader,
                                                                               True, True)
        else:
            sk_f, sk_l, cad_f, cad_l = self.get_metric_features(sk_feature_loader, cad_feature_loader, True)
        # Euclidean measure
        distance = get_euclidean_distance(sk_f, cad_f, 'euclidean')
        # similarity
        sim = 1 / (1 + distance)
        # the label corresponding to the similarity
        label_sim = (np.expand_dims(sk_l, axis=1) == np.expand_dims(cad_l, axis=0)) * 1
        # print(label_sim)
        # mAP
        n = label_sim.shape[0]
        num_cores = min(multiprocessing.cpu_count(), 32)
        max_map = 0
        for m in range(15):
            aps_10 = Parallel(n_jobs=num_cores)(delayed(get_average_precision)(label_sim[i], sim[i], m)
                                                for i in range(n))
            # aps_all = Parallel(n_jobs=num_cores)(delayed(get_average_precision)(label_sim[i], sim[i], 0) for i in range(n))
            # aps_32 = Parallel(n_jobs=num_cores)(delayed(get_average_precision)(label_sim[i], sim[i], 32) for i in range(n))
            aps_10 = np.array(aps_10)
            if get_visual:
                # rank of similarity
                rank = torch.argsort(torch.from_numpy(sim))[:, -10:]
                sk_load = os.path.join('.', 'labels', 'SHREC14-EDGE', 'sk_edge.hdf5')
                sk_pd = pd.read_hdf(sk_load, 'sk')
                labels = sorted(list(set(sk_pd['cat'])))
                retrieval_res = {'sk_id': [], 'class': [], 'mAp': [], 'cad_id': []}
                for i in range(aps_10.shape[0]):
                    # if aps_10[i] >= 0.8 or aps_10[i] <= 0.2:
                    retrieval_res['mAp'].append(aps_10[i])
                    retrieval_res['class'].append(labels[sk_l[i]])
                    retrieval_res['sk_id'].append(sk_id[i])
                    retrieval_res['cad_id'].append(cad_id[rank[i]])
                frame = pd.DataFrame(retrieval_res)
                torch.save(frame, os.path.join('.', 'pth', 'shrec14', 'retrieval_result.pth'))
            # aps_10[np.isnan(aps_10)] = 0
            # aps_10 = aps_10[not np.isnan(aps_10)]
            index = np.isnan(aps_10)
            index = ~index
            aps_10 = np.mean(aps_10[index])
            # aps_10 = np.mean(aps_10)
            print(m, aps_10)
            max_map = max(max_map, aps_10)

        # aps_all = np.array(aps_all)
        # aps_all[np.isnan(aps_all)] = 0
        # aps_all = np.mean(aps_all)

        # aps_32 = np.array(aps_32)
        # aps_32[np.isnan(aps_32)] = 0
        # aps_32 = np.mean(aps_32)
        # log.logger.info('mAP@all = {}, mAP@10 = {}, mAP@32 = {}'.format(aps_all, aps_10, aps_32))
        # return max(aps_10, aps_32, aps_all)
        return max_map

    def getHardestSamples(self, anchors: torch.tensor, anchors_label: torch.tensor, data: Dataset, dataset: str):
        anchors_samples, pos_samples, neg_samples = [], [], []
        # extract positive and negative data
        for a, a_l in zip(anchors, anchors_label):
            _a = a
            pos_label = a_l.item()
            pos_data = data.mapping[pos_label]
            neg_data = data.neg_mapping[pos_label]
            # get final features
            if dataset == 'sketch':
                _a = _a.unsqueeze(0)
                _a = self.sk_backbone(_a)
                pos_data = self.sk_backbone(pos_data)
                neg_data = self.sk_backbone(neg_data)
            else:
                pos_data = self.cad_backbone(pos_data)
                neg_data = self.cad_backbone(neg_data)
            # euclidean_distance
            criteria = nn.PairwiseDistance(p=2)
            # hardest positive sample
            dis_pos = criteria(_a, pos_data)
            idx_pos = dis_pos.argsort()[-1]
            pos_samples.append(pos_data[idx_pos])
            # hardest negtive sample
            dis_neg = criteria(_a, neg_data)
            idx_neg = dis_neg.argsort()[0]
            neg_samples.append(neg_data[idx_neg])
            anchors_samples.append(_a)
        # print(dataset, 'pos:', torch.max(dis_pos), dis_pos[idx_pos])
        # print(dataset, 'neg:', torch.min(dis_neg), dis_neg[idx_neg])
        pos_samples = torch.stack(pos_samples)
        neg_samples = torch.stack(neg_samples)
        anchors_samples = torch.stack(anchors_samples).squeeze()
        return anchors_samples, pos_samples, neg_samples

    def log_record(self, epoch: int, run_time, loss: torch.tensor, loss_tri_sk: torch.tensor,
                   loss_tri_cad: torch.tensor, loss_tri_mix: torch.tensor, mAP: float, mode: str):
        """Print logs to the console, record logs to log.txt file, record loss and accuracy to tensorboardX.
        Args:
            epoch: Current epoch.
            run_time: Run time of current epoch.
            loss_cls: Classification lss.
            loss: Total loss = triple loss + classification loss.
            metrics: Accuracy, Recall, Precision.
            mode: 'train' or 'test'.        
        
        """
        if mode == 'train':
            info = "Epoch-{}: {:03d}/{:03d}\trun_time:{:.4f}\tloss: {:.4f}\tloss_tri_sk: {:.4f}\tloss_tri_cad: {:.4f}\tloss_tri_mix: {:.4f}\tmAP: {:.4f}".format(
                mode, epoch, self.epochs, run_time, loss, loss_tri_sk, loss_tri_cad, loss_tri_mix, mAP)
            self.tb.add_scalar(mode + '/loss', loss, epoch)
            self.tb.add_scalar(mode + '/loss_tri_sk', loss_tri_sk, epoch)
            self.tb.add_scalar(mode + '/loss_tri_cad', loss_tri_cad, epoch)
            self.tb.add_scalar(mode + '/loss_tri_mix', loss_tri_mix, epoch)
            self.tb.add_scalar(mode + '/mAP', mAP, epoch)
        else:
            info = "Epoch-{}: {:03d}/{:03d}\t run_time:{:.4f}\tmAP: {:.4f}\n".format(
                mode, epoch, self.epochs, run_time, mAP)
            self.tb.add_scalar(mode + '/mAP', mAP, epoch)
        log.logger.info(info)

    def store_model(self, mAP: float, max_mAP: float, epoch: int):
        """Save the model as .pth file.
        
        Args:
            mAP: Mean average precision of current epoch.
            max_acc: Max Mean average precision.
            epoch: Current epoch.
        
        """
        if self.save_model and mAP > max_mAP:
            Path("checkpoints").mkdir(parents=True, exist_ok=True)
            torch.save(
                {
                    'metric_cad_backbone_state_dict': self.cad_backbone.state_dict(),
                    'metric_sk_backbone_state_dict': self.sk_backbone.state_dict(),
                    'optimizer_sk_state_dict': self.sk_optimizer.state_dict(),
                    'optimizer_cad_state_dict': self.cad_optimizer.state_dict(),
                },
                os.path.join('.', 'checkpoints',
                             str(epoch) + '_' + str(mAP) + str(time.strftime('%Y%m%d%H%M%S')) + '_metric.pth'))

    def get_metric_features(self,
                            sk_feature_loader: DataLoader,
                            cad_feature_loader: DataLoader,
                            get_numpy: bool = False,
                            get_visual: bool = False):
        # sketch features
        for i, batch in enumerate(sk_feature_loader):
            x, label, id = batch
            out_f = self.sk_backbone(x)
            if i == 0:
                if get_numpy:
                    sk_f = out_f.to('cpu').numpy()
                    sk_l = label.to('cpu').numpy()
                else:
                    sk_f = out_f
                    sk_l = label
                if get_visual:
                    sk_id = id
            else:
                if get_numpy:
                    sk_f = np.concatenate((sk_f, out_f.to('cpu').numpy()), axis=0)
                    sk_l = np.concatenate((sk_l, label.to('cpu').numpy()), axis=0)
                else:
                    sk_f = torch.cat((sk_f, out_f), axis=0)
                    sk_l = torch.cat((sk_l, label), axis=0)
                if get_visual:
                    sk_id += id

        # 3D shape features
        for i, batch in enumerate(cad_feature_loader):
            x, label, id = batch
            out_f = self.cad_backbone(x)
            if i == 0:
                if get_numpy:
                    cad_f = out_f.to('cpu').numpy()
                    cad_l = label.to('cpu').numpy()
                else:
                    cad_f = out_f
                    cad_l = label
                if get_visual:
                    cad_id = id
            else:
                if get_numpy:
                    cad_f = np.concatenate((cad_f, out_f.to('cpu').numpy()), axis=0)
                    cad_l = np.concatenate((cad_l, label.to('cpu').numpy()), axis=0)
                else:
                    cad_f = torch.cat((cad_f, out_f), axis=0)
                    cad_l = torch.cat((cad_l, label), axis=0)
                if get_visual:
                    cad_id += id
        if get_visual:
            return sk_f, sk_l, np.array(sk_id), cad_f, cad_l, np.array(cad_id)
        return sk_f, sk_l, cad_f, cad_l

    def save_features(self, num_workers: int = 0):
        """Save features of sketches to file.
        
        Args:
            sk_backbone: sk_backbone of the network, 'resnet-50' or 'resnet-18'.
            num_workers: How many subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
            pretrained: Whether to use prettrained model.
        """
        # load pretrained model
        # pth = torch.load(os.path.join('.', 'pth', 'part-shrec14', 'part-metric_0.6102_model.pth'))
        pth = torch.load(os.path.join('.', 'pth', 'shrec14', 'shrec14_metric_resnet50_0.2073.pth'))
        self.sk_backbone.load_state_dict(pth['metric_sk_backbone_state_dict'])
        self.cad_backbone.load_state_dict(pth['metric_cad_backbone_state_dict'])

        log.logger.info("----- Start loading data -----")
        sk_feature = os.path.join('.', 'features', 'shrec14_sketch_features_resnet50.pth')
        cad_feature = os.path.join('.', 'features', 'shrec14_shape_features_resnet50.pth')
        # sk_feature = os.path.join('.', 'features', 'part-shrec14_sketch_features_resnet50.pth')
        # cad_feature = os.path.join('.', 'features', 'part-shrec14_shape_features_resnet50.pth')
        sk_feature_train_data = FeatureDataset(sk_feature, 'train')
        sk_feature_test_data = FeatureDataset(sk_feature, 'test')
        sk_feature_train_loader = DataLoader(dataset=sk_feature_train_data,
                                             batch_size=self.batch_size,
                                             shuffle=True,
                                             num_workers=num_workers)
        sk_feature_test_loader = DataLoader(dataset=sk_feature_test_data,
                                            batch_size=self.batch_size,
                                            shuffle=True,
                                            num_workers=num_workers)
        cad_feature_train_data = FeatureDataset(cad_feature, 'train')
        cad_feature_test_data = FeatureDataset(cad_feature, 'test')
        cad_feature_train_loader = DataLoader(dataset=cad_feature_train_data,
                                              batch_size=self.batch_size,
                                              shuffle=True,
                                              num_workers=num_workers)
        cad_feature_test_loader = DataLoader(dataset=cad_feature_test_data,
                                             batch_size=self.batch_size,
                                             shuffle=True,
                                             num_workers=num_workers)
        mAP_test = self.test(sk_feature_train_loader, cad_feature_train_loader)
        log.logger.info('check mAP: ' + str(mAP_test))
        mAP_test = self.test(sk_feature_test_loader, cad_feature_test_loader)
        log.logger.info('check mAP: ' + str(mAP_test))
        log.logger.info("----- Loading data completed -----")
        return

        log.logger.info("----- Start saving features -----")
        sk_train_f, sk_train_l, sk_train_id, cad_train_f, cad_train_l, sk_train_id = self.get_metric_features(
            sk_feature_train_loader, cad_feature_train_loader)
        sk_test_f, sk_test_l, sk_test_id, cad_test_f, cad_test_l, sk_test_id = self.get_metric_features(
            sk_feature_test_loader, cad_feature_test_loader)
        # save features
        features_memory = {
            'sk_train_f': sk_train_f,
            'sk_train_l': sk_train_l,
            'sk_test_f': sk_test_f,
            'sk_test_l': sk_test_l,
            'cad_train_f': cad_train_f,
            'cad_train_l': cad_train_l,
            'cad_test_f': cad_test_f,
            'cad_test_l': cad_test_l,
        }
        features_path = os.path.join('.', 'features')
        if not os.path.exists(features_path):
            os.makedirs(features_path)
        torch.save(features_memory, os.path.join(features_path, 'metric_features.pth'))
        log.logger.info("----- Saving features completed -----")