import torch
from torch.nn import functional as F
import torch.nn as nn
import cv2
import pickle
import os
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from utils.metrics import cal_confusion_matrix, estimate_thr_recall
from torchvision import transforms
from utils.embedding_utils import *
from .base import BaseModel
from torchvision.models import wide_resnet50_2
from collections import OrderedDict
from tqdm import tqdm
from scipy.ndimage import gaussian_filter
from utils.metrics import AverageMeter, estimate_thr_by_list
from scipy.spatial.distance import mahalanobis
from random import sample

inv_normalize = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255], std=[1/0.229, 1/0.224, 1/0.255])

class PaDiM(BaseModel):
    def __init__(self,cfg):
        super(BaseModel, self).__init__()

        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'

        self.cfg = cfg
        self.init_model()
        self.init_results_list()
        self.embedding_dir_path, self.sample_path = prep_dirs(self.cfg['results_dir'])

    def forward(self,x):
        raise NotImplementedError

    def hook(self, module, input, output):
        self.embedding.append(output)

    def init_model(self):
        if self.cfg['backbone'] == 'resnet18':
            self.model = resnet18(pretrained=True, progress=True)
            self.t_d = 448
            self.d = 100
        elif self.cfg['backbone'] == 'wide_resnet50_2':
            self.model = wide_resnet50_2(pretrained=True, progress=True)
            self.t_d = 1792
            self.d = 550
        #用于随机采样的索引，注意测试和训练时的idx要保持一致
        self.idx = torch.tensor(sample(range(0, self.t_d), self.d))

        self.model.to(self.device)
        self.model.eval()
        self.model.layer1[-1].register_forward_hook(self.hook)
        self.model.layer2[-1].register_forward_hook(self.hook)
        self.model.layer3[-1].register_forward_hook(self.hook)
        self.model.avgpool.register_forward_hook(self.hook)

    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
        self.img_list = []
        self.embedding = []
        self.train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
        self.test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])

    def train(self, dataloader):


        for imgs, _, _, _ in tqdm(dataloader):
            imgs = imgs.to(self.device)
            with torch.no_grad():
                _ = self.model(imgs)
                # for ii in self.embedding:
                #     print(ii.shape)
            for k, v in zip(self.train_outputs.keys(), self.embedding):
                self.train_outputs[k].append(v)
            self.embedding = []
        # 就是把所有的图片数据的对应layer的特征concat到一起
        for k, v in self.train_outputs.items():
            self.train_outputs[k] = torch.cat(v, 0)
        embedding_vectors = self.train_outputs['layer1'].cpu()

        for layer_name in ['layer2', 'layer3']:
            self.train_outputs[layer_name] = self.train_outputs[layer_name].cpu()
            embedding_vectors = embedding_concat(embedding_vectors, self.train_outputs[layer_name])


        # randomly select d dimension
        embedding_vectors = torch.index_select(embedding_vectors, 1, self.idx)

        # calculate multivariate Gaussian distribution
        B, C, H, W = embedding_vectors.size()
        embedding_vectors = embedding_vectors.view(B, C, H * W)
        # 保存均值和协方差
        mean = torch.mean(embedding_vectors, dim=0).numpy()
        cov = torch.zeros(C, C, H * W).numpy()
        I = np.identity(C)
        for i in range(H * W):
            # cov[:, :, i] = LedoitWolf().fit(embedding_vectors[:, :, i].numpy()).covariance_
            cov[:, :, i] = np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * I
            # save learned distribution
        self.train_outputs = [mean, cov]
    

    def train_after(self,c):
        with open(os.path.join(self.embedding_dir_path, f'embedding_{c}.pickle'), 'wb') as f:
            pickle.dump(self.train_outputs, f)


    def test_after(self, test_dataloader,c): 
        self.train_outputs = pickle.load(open(os.path.join(self.embedding_dir_path, f'embedding_{c}.pickle'), 'rb'))
        for index, data in enumerate(tqdm(test_dataloader)):
            x, y, mask, path = data
            x = x.to(self.device)

            with torch.no_grad():
                _ = self.model(x.to(self.device))
            # get intermediate layer outputs
            for k, v in zip(self.test_outputs.keys(), self.embedding):
                self.test_outputs[k].append(v)

            self.img_list.extend(x.cpu().numpy())
            self.gt_list_px_lvl.extend(mask.cpu().numpy())
            self.gt_list_img_lvl.extend(y.cpu().numpy())
            self.img_path_list.extend(path)  # add class

        for k, v in self.test_outputs.items():
            self.test_outputs[k] = torch.cat(v, 0)

        embedding_vectors = self.test_outputs['layer1'].cpu()
        for layer_name in ['layer2', 'layer3']:
            self.test_outputs[layer_name] = self.test_outputs[layer_name].cpu()
            embedding_vectors = embedding_concat(embedding_vectors, self.test_outputs[layer_name])


#         randomly select d dimension
        embedding_vectors = torch.index_select(embedding_vectors, 1, self.idx)

        # calculate distance matrix
        B, C, H, W = embedding_vectors.size()
        embedding_vectors = embedding_vectors.view(B, C, H * W).numpy()
        dist_list = []
        for i in range(H * W):
            mean = self.train_outputs[0][:, i]
            #np.linalg.inv矩阵求逆
            #计算训练中每一个样本embedding的逆矩阵
            conv_inv = np.linalg.inv(self.train_outputs[1][:, :, i])
            #计算每一个测试集中的sample与训练集的马氏距离
            dist = [mahalanobis(sample[:, i], mean, conv_inv) for sample in embedding_vectors]
            dist_list.append(dist)

        dist_list = np.array(dist_list).transpose(1, 0).reshape(B, H, W)

        # upsample
        dist_list = torch.tensor(dist_list)
        score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear',
                                  align_corners=False).squeeze().numpy()
        
        # apply gaussian smoothing on the score map
        for i in range(score_map.shape[0]):
            score_map[i] = gaussian_filter(score_map[i], sigma=4)
        img_scores = score_map.reshape(score_map.shape[0], -1).max(axis=1)

        self.pred_list_px_lvl = score_map
        self.pred_list_img_lvl = img_scores

        self.pred_list_px_lvl = self.min_max_norm(np.array(self.pred_list_px_lvl))
        self.pred_list_img_lvl = self.min_max_norm(np.array(self.pred_list_img_lvl))
        self.gt_list_img_lvl = np.array(self.gt_list_img_lvl)
        self.gt_list_px_lvl = np.array(self.gt_list_px_lvl)

        threshold = self.est_thresh(self.gt_list_px_lvl.flatten().astype('uint8'),self.pred_list_px_lvl.flatten())
        visualize_loc_result(self.img_list, self.img_path_list, self.gt_list_px_lvl, self.pred_list_px_lvl, threshold, self.sample_path, c, self.cfg['n_viz'])


    def evaluate(self,c):
        #c是class

        # 替换回metrics TODO，待实验
        # print("Total pixel-level auc-roc score :")
        try:
            pixel_auc = roc_auc_score(self.gt_list_px_lvl, self.pred_list_px_lvl)
        except:
            gt_list_px_lvl = np.array(self.gt_list_px_lvl).flatten()
            pred_list_px_lvl = np.array(self.pred_list_px_lvl).flatten()
            pixel_auc = roc_auc_score(gt_list_px_lvl, pred_list_px_lvl)


        print("Total pixel-level auc-roc score :",pixel_auc)
        img_auc = roc_auc_score(self.gt_list_img_lvl, self.pred_list_img_lvl)   
        print("Total image-level auc-roc score :",img_auc)
        print('test_epoch_end')
        values = {'pixel_auc': round(pixel_auc, 4), 'img_auc': round(img_auc, 4)}

        # anomaly_list = []
        # normal_list = []
        # for i in range(len(self.gt_list_img_lvl)):
        #     if self.gt_list_img_lvl[i] == 1:
        #         anomaly_list.append(self.pred_list_img_lvl[i])
        #     else:
        #         normal_list.append(self.pred_list_img_lvl[i])

        # # print("normal_list", normal_list, '\n', "anomaly_list", anomaly_list)

        # y_true = [0] * len(normal_list) + [1] * len(anomaly_list)
        # best_thr = estimate_thr_recall(y_true, normal_list+anomaly_list)

        best_thr = estimate_thr_recall(self.gt_list_img_lvl, self.pred_list_img_lvl)

        print('best_thr is:', best_thr)

        # best_thr_bylist = estimate_thr_by_list(y_true, np.array(normal_list+anomaly_list), step_p=0.1)
        # print('best_thr_bylist is:', best_thr_bylist)

        # thresholding
        noraml_recall, abnormal_recall, precision ,false_p, false_n = \
            cal_confusion_matrix(self.gt_list_img_lvl, self.pred_list_img_lvl, img_path_list=self.img_path_list, thresh=best_thr)
        values["noraml_recall"] = round(noraml_recall, 4)
        values["abnormal_recall"] = round(abnormal_recall, 4)
        values["precision"] = round(precision, 4)
        # add best thresh
        values["best_thr"] = round(best_thr, 6)
        values["false_p"] = false_p
        values["false_n"] = false_n

        # self.log_dict(values)
        print('test end')
        
        with open(os.path.join(self.cfg['results_dir'],'results_{}.txt'.format(self.cfg['method'])), 'a') as f:
            f.write(c + ' : ' + str(values) + '\n')