import torch
from torch.nn import functional as F
import cv2
import numpy as np
import os
from sklearn.metrics import roc_auc_score,precision_recall_curve
from utils.metrics import cal_confusion_matrix, estimate_thr_recall
import pytorch_lightning as pl
import pickle

from sklearn.random_projection import SparseRandomProjection
from sklearn.neighbors import NearestNeighbors
from scipy.ndimage import gaussian_filter
from tqdm import tqdm
from backbones.resnet import WideResNet50
from torchvision import transforms
from utils.embedding_utils import *
from .base import BaseModel


class PatchCore(BaseModel):
    def __init__(self,cfg):
        super(BaseModel, self).__init__()

        self.cfg = cfg
        self.features = []
        self.embedding_list = []
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'

        self.init_model()
        self.init_results_list()
        self.embedding_dir_path, self.sample_path = prep_dirs(self.cfg['results_dir'])

    def forward(self,x):
        _, f2, f3 = self.model(x)
        #输出最后两个stage的特征图
        self.features.append(f2)
        self.features.append(f3)
        return [f2, f3]

    def init_model(self):
        self.model = WideResNet50()
        self.model.to(self.device)
        self.model.eval()

    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
        self.img_list = []
            
    def train(self, dataloader):
        for imgs, _, _, _ in dataloader:
            imgs = imgs.to(self.device)
            with torch.no_grad():
                features = self.forward(imgs)
                embeddings = []
                for feature in features:
                    m = torch.nn.AvgPool2d(3, 1, 1)
                    embeddings.append(m(feature))
                embedding = embedding_concat(embeddings[0], embeddings[1])
                self.embedding_list.extend(reshape_embedding(np.array(embedding)))


    def train_after(self,c):
        # print('111111111111111111111')
        total_embeddings = np.array(self.embedding_list)
        # Random projection
        #模块实现了一个简单且高效率的计算方式来减少数据维度，通过牺牲一定的精度（作为附加变量）来加速处理时间及更小的模型尺寸。 
        # 这个模型实现了两类无结构化的随机矩阵: Gaussian random matrix 和 sparse random matrix.
        #随机投影矩阵的维度和分布是受控制的，所以可以保存任意两个数据集的距离。因此随机投影适用于基于距离的方法

        #SparseRandomProjection是稀疏随机投影.使用稀疏随机矩阵，通过投影原始输入空间来降低维度。
        self.randomprojector = SparseRandomProjection(n_components='auto', eps=0.9) # 'auto' => Johnson-Lindenstrauss lemma
        self.randomprojector.fit(total_embeddings)
        # Coreset Subsampling
        selector = kCenterGreedy(total_embeddings,0,0)

        selected_idx = selector.select_batch(model=self.randomprojector, already_selected=[], N=int(total_embeddings.shape[0]*self.cfg['coreset_sampling_ratio']))
        # print(selected_)
        self.embedding_coreset = total_embeddings[selected_idx]
        
        # print('initial embedding size : ', total_embeddings.shape)
        # print('final embedding size : ', self.embedding_coreset.shape)
        with open(os.path.join(self.embedding_dir_path, f'embedding_{c}.pickle'), 'wb') as f:
            pickle.dump(self.embedding_coreset, f)

    def test_after(self, test_dataloader,c): # Nearest Neighbour Search
        self.embedding_coreset = pickle.load(open(os.path.join(self.embedding_dir_path, f'embedding_{c}.pickle'), 'rb'))
        # print('embedding_coreset',self.embedding_coreset.shape)
        #(163, 1536)

        for index, data in enumerate(tqdm(test_dataloader)):
            x, y, mask, path = data
            x = x.to(self.device)

            # extract embedding
            features = self.forward(x)
            embeddings = []
            for feature in features:
                m = torch.nn.AvgPool2d(3, 1, 1)
                embeddings.append(m(feature))
            # print(embeddings[0].shape)
            # print(embeddings[1].shape)
            # torch.Size([1, 512, 28, 28])
            # torch.Size([1, 1024, 14, 14])
            embedding_ = embedding_concat(embeddings[0], embeddings[1])
#             print(embeddings[0].shape)
            # print('embedding',embedding_.shape)
            #[1, 1536, 28, 28]

            embedding_test = np.array(reshape_embedding(embedding_.detach().numpy()))

            # print('embdedding_test',embedding_test.shape)
            #(784, 1536)

            # NN
            # NearestNeighbors 执行无监督的最近邻方法，有三种不同的最近邻算法：BallTree、KDTree、
            # a brute-force algorithm based on routines in sklearn.metrics.pairwise，邻居的搜索算法通过关键词 ‘algorithm’ 控制，
            # 选项包括['auto', 'ball_tree', 'kd_tree', 'brute']，当设置为‘auto’时，算法将通过训练数据决定最好的方法。
            nbrs = NearestNeighbors(n_neighbors=self.cfg['n_neighbors'], algorithm='ball_tree', metric='minkowski', p=2).fit(self.embedding_coreset)

            # nbrs.kneighbors(x)返回两个值，第一个是离x最近的k个特征的距离，第二个是特征的索引
            # 对于embedding_test在embedding_coreset中找距离最近的9个特征的距离。距离从小到大排序。
            # 所以下面都取score_patches[:,0]索引0，代表距离最小
            # 其实就是patch的特征和训练的一整张图像的特征去算距离
            score_patches, _ = nbrs.kneighbors(embedding_test)

            # 速度更快
            # knn = KNN(torch.from_numpy(self.embedding_coreset).cuda(), k=self.cfg['n_neighbors'])
            # score_patches = knn(torch.from_numpy(embedding_test).cuda())[0].cpu().detach().numpy()

            # print(score_patches[:,0].shape)
            # print('score_patches',score_patches.shape)
            #(784,9)
            # for score in score_patches:
            #     print(score)

            #选取距离最小的
            anomaly_map = score_patches[:,0].reshape((28,28))   # 28x28
            # print('anomaly_map',anomaly_map.shape)
            
            N_b = score_patches[np.argmax(score_patches[:,0])]   # patch
            # print('argmax',np.argmax(score_patches[:,0]))
            # print('N_b',N_b.shape)
            #（9,）
            w = (1 - (np.max(np.exp(N_b))/np.sum(np.exp(N_b))))

            #reweight,选取最大patch的score
            score = w*max(score_patches[:,0]) # Image-level score
            
#             gt_np = mask.cpu().numpy()[0,0].astype(int)
            anomaly_map_resized = cv2.resize(anomaly_map, (self.cfg['input_size'], self.cfg['input_size']))
            anomaly_map_resized_blur = gaussian_filter(anomaly_map_resized, sigma=4)
            
            
            self.gt_list_px_lvl.extend(mask.cpu().numpy())
            self.gt_list_img_lvl.extend(y.cpu().numpy())
            self.img_path_list.extend(path)  # add class
            
#             self.gt_list_px_lvl.extend(gt_np.ravel())
            self.pred_list_px_lvl.extend([anomaly_map_resized_blur])
#             self.gt_list_img_lvl.append(y.cpu().numpy()[0])
            self.pred_list_img_lvl.extend([score])
#             self.img_path_list.extend(path)  # add clas
            self.img_list.extend(x.cpu().numpy())
        
        self.pred_list_px_lvl = self.min_max_norm(np.array(self.pred_list_px_lvl))
        self.pred_list_img_lvl = self.min_max_norm(np.array(self.pred_list_img_lvl))
        self.gt_list_img_lvl = np.array(self.gt_list_img_lvl)
        self.gt_list_px_lvl = np.array(self.gt_list_px_lvl)

        threshold = self.est_thresh(self.gt_list_px_lvl.flatten().astype('uint8'),self.pred_list_px_lvl.flatten())
        print('pixel-level best_thr is', threshold)
        visualize_loc_result(self.img_list, self.img_path_list, self.gt_list_px_lvl, self.pred_list_px_lvl, threshold, self.sample_path, c, self.cfg['n_viz'])


   