import torch
from torch.nn import functional as F
import torch.nn as nn
import cv2
import pickle
import os
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from utils.metrics import cal_confusion_matrix, estimate_thr_recall
from torchvision import transforms
from utils.embedding_utils import *
from .base import BaseModel
from torchvision.models import wide_resnet50_2
from collections import OrderedDict
from tqdm import tqdm
from scipy.ndimage import gaussian_filter
from utils.metrics import AverageMeter, estimate_thr_by_list


class SPADE(BaseModel):
    def __init__(self,cfg):
        super(BaseModel, self).__init__()

        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'

        self.cfg = cfg
        self.init_model()
        self.init_results_list()
        self.embedding_dir_path, self.sample_path = prep_dirs(self.cfg['results_dir'])

    def forward(self,x):
        raise NotImplementedError

    def hook(self, module, input, output):
        self.embedding.append(output)

    def init_model(self):
        self.model = wide_resnet50_2(pretrained=True, progress=True)
        self.model.to(self.device)
        self.model.eval()
        self.model.layer1[-1].register_forward_hook(self.hook)
        self.model.layer2[-1].register_forward_hook(self.hook)
        self.model.layer3[-1].register_forward_hook(self.hook)
        self.model.avgpool.register_forward_hook(self.hook)

    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
        self.img_list = []
        self.embedding = []
        self.train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', []), ('avgpool', [])])
        self.test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', []), ('avgpool', [])])

    def train(self, dataloader):


        for imgs, _, _, _ in tqdm(dataloader):
            imgs = imgs.to(self.device)
            with torch.no_grad():
                _ = self.model(imgs)
            for k, v in zip(self.train_outputs.keys(), self.embedding):
                self.train_outputs[k].append(v)

        #就是把所有的图片数据的特征concat到一起
        for k, v in self.train_outputs.items():
            self.train_outputs[k] = torch.cat(v, 0)
            # print(self.train_outputs[k].shape)


                #self.embedding
                # torch.Size([64, 256, 56, 56])
                # --------------------
                # torch.Size([64, 512, 28, 28])
                # --------------------
                # torch.Size([64, 1024, 14, 14])
                # --------------------
                # torch.Size([64, 2048, 1, 1])

                # for i in self.embedding:
                #     print(i.shape)
                #     print("--------------------")

                # with open(train_feature_filepath, 'wb') as f:
                #     pickle.dump(train_outputs, f)
                # for i in features:
                #     print(i.shape)

    def train_after(self,c):
        with open(os.path.join(self.embedding_dir_path, f'embedding_{c}.pickle'), 'wb') as f:
            pickle.dump(self.train_outputs, f)


    def test_after(self, test_dataloader,c): 
        self.train_outputs = pickle.load(open(os.path.join(self.embedding_dir_path, f'embedding_{c}.pickle'), 'rb'))
        for index, data in enumerate(tqdm(test_dataloader)):
            x, y, mask, path = data
            x = x.to(self.device)

            with torch.no_grad():
                _ = self.model(x.to(self.device))
            # get intermediate layer outputs
            for k, v in zip(self.test_outputs.keys(), self.embedding):
                self.test_outputs[k].append(v)

            self.img_list.extend(x.cpu().numpy())
            self.gt_list_px_lvl.extend(mask.cpu().numpy())
            self.gt_list_img_lvl.extend(y.cpu().numpy())
            self.img_path_list.extend(path)  # add class

        for k, v in self.test_outputs.items():
            self.test_outputs[k] = torch.cat(v, 0)

        dist_matrix = calc_dist_matrix(torch.flatten(self.test_outputs['avgpool'], 1),
                                       torch.flatten(self.train_outputs['avgpool'], 1))
        topk_values, topk_indexes = torch.topk(dist_matrix, k=self.cfg['top_k'], dim=1, largest=False)
        self.pred_list_img_lvl = torch.mean(topk_values, 1).cpu().detach().numpy()

        for t_idx in tqdm(range(self.test_outputs['avgpool'].shape[0])):
            score_maps = []
            for layer_name in ['layer1', 'layer2', 'layer3']:  # for each layer

                # construct a gallery of features at all pixel locations of the K nearest neighbors
                topk_feat_map = self.train_outputs[layer_name][topk_indexes[t_idx]]
                test_feat_map = self.test_outputs[layer_name][t_idx:t_idx + 1]
                #交换1，3维度并拉长
                feat_gallery = topk_feat_map.transpose(3, 1).flatten(0, 2).unsqueeze(-1).unsqueeze(-1)

                #print('feat_gallery',feat_gallery.shape)
                #[15680, 256, 1, 1]

                # calculate distance matrix
                dist_matrix_list = []


                # torch.Size([1, 256, 56, 56])
                # torch.Size([100, 256, 1, 1])    
                #每一百个比较一次，特征图element-wise进行比较
                for d_idx in range(feat_gallery.shape[0] // 100):
                    dist_matrix = torch.pairwise_distance(feat_gallery[d_idx * 100:d_idx * 100 + 100], test_feat_map)
                    #print('mmm',dist_matrix.shape)
                    #[100, 14, 14]
                    dist_matrix_list.append(dist_matrix)
                dist_matrix = torch.cat(dist_matrix_list, 0)

                # print(dist_matrix.shape)
                # torch.Size([15600, 56, 56])

                # k nearest features from the gallery (k=1)
                score_map = torch.min(dist_matrix, dim=0)[0]
                # print('sss',score_map.shape)
                # torch.Size([14, 14])
                score_map = F.interpolate(score_map.unsqueeze(0).unsqueeze(0), size=self.cfg['input_size'],
                                            mode='bilinear', align_corners=False)
                score_maps.append(score_map)

            # average distance between the features
            score_map = torch.mean(torch.cat(score_maps, 0), dim=0)
            # apply gaussian smoothing on the score map
            score_map = gaussian_filter(score_map.cpu().detach().numpy(), sigma=4)
            # print(score_map.shape)
            self.pred_list_px_lvl.extend(score_map)


        self.pred_list_px_lvl = self.min_max_norm(np.array(self.pred_list_px_lvl))
        self.pred_list_img_lvl = self.min_max_norm(np.array(self.pred_list_img_lvl))
        self.gt_list_img_lvl = np.array(self.gt_list_img_lvl)
        self.gt_list_px_lvl = np.array(self.gt_list_px_lvl)

        threshold = self.est_thresh(self.gt_list_px_lvl.flatten().astype('uint8'),self.pred_list_px_lvl.flatten())
        visualize_loc_result(self.img_list, self.img_path_list, self.gt_list_px_lvl, self.pred_list_px_lvl, threshold, self.sample_path, c, self.cfg['n_viz'])


   