import torch
from torch.nn import functional as F
import cv2
import numpy as np
import os
from sklearn.metrics import roc_auc_score
import pytorch_lightning as pl
import pickle
from anomaly.utils import kCenterGreedy
from sklearn.random_projection import SparseRandomProjection
from sklearn.neighbors import NearestNeighbors
from scipy.ndimage import gaussian_filter
from tqdm import tqdm
from anomaly.utils.metrics import cal_confusion_matrix, estimate_thr_recall
from anomaly.backbones import WideResNet50
# from anomaly.datasets import inv_normalize   # TODO
from torchvision import transforms

inv_normalize = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255], std=[1/0.229, 1/0.224, 1/0.255])  # in patchcore_train.py


def prep_dirs(root, args):
    # make embeddings dir
    # root = '/home/ops/anomaly_lab/patchcore'
    embeddings_path = args.weights_dir
    os.makedirs(embeddings_path, exist_ok=True)
    # make sample dir
    sample_path = os.path.join(root, 'sample')
    os.makedirs(sample_path, exist_ok=True)

    return embeddings_path, sample_path


def embedding_concat(x, y):
    # from https://github.com/xiahaifeng1995/PaDiM-Anomaly-Detection-Localization-master
    B, C1, H1, W1 = x.size()
    _, C2, H2, W2 = y.size()
    s = int(H1 / H2)
    x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
    x = x.view(B, C1, -1, H2, W2)
    z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
    for i in range(x.size(2)):
        z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
    z = z.view(B, -1, H2 * W2)
    z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)

    return z

def reshape_embedding(embedding):
    embedding_list = []
    for k in range(embedding.shape[0]):
        for i in range(embedding.shape[2]):
            for j in range(embedding.shape[3]):
                embedding_list.append(embedding[k, :, i, j])
    return embedding_list


def cvt2heatmap(gray):
    heatmap = cv2.applyColorMap(np.uint8(gray), cv2.COLORMAP_JET)
    return heatmap


# frame = cv2.imread(img) # origin image
# heatmap = cv2.imread(hm) # heatmap image
# overlay = frame.copy()
# alpha = 0.5 # 设置覆盖图片的透明度
# cv2.rectangle(overlay, (0, 0), (frame.shape[1], frame.shape[0]), (255, 0, 0), -1) # 设置蓝色为热度图基本色
# cv2.addWeighted(overlay, alpha, frame, 1-alpha, 0, frame) # 将背景热度图覆盖到原图
# cv2.addWeighted(heatmap, alpha, frame, 1-alpha, 0, frame) # 将热度图覆盖到原图
# cv2.imshow('frame', frame)
# cv2.waitKey(0)

def heatmap_on_image(heatmap, image):
    if heatmap.shape != image.shape:
        heatmap = cv2.resize(heatmap, (image.shape[0], image.shape[1]))
    #out = np.float32(heatmap)/255 + np.float32(image)/255 直接这样粗暴相加不太好
    #选择下面的方式
    alpha = 0.2 # 设置覆盖图片的透明度
    out = cv2.addWeighted(np.float32(heatmap), alpha, np.float32(image), 1-alpha, 0)
#     out = heatmap
    out = out / np.max(out)
    return np.uint8(255 * out)

def min_max_norm(image):
    a_min, a_max = image.min(), image.max()
    return (image-a_min)/(a_max - a_min)    


class PatchCore(torch.nn.Module):
    def __init__(self, args):
        super(PatchCore, self).__init__()
    
        self.args = args
        self.root = os.path.dirname(self.args.weights_dir)

        self.model = WideResNet50()
        self.model.eval()
        self.features = []
        self.embedding_list = []  # add by batch
        self.init_results_list()
        self.embedding_dir_path, self.sample_path = prep_dirs(self.root, self.args)

    def forward(self, x):
        _, f2, f3 = self.model(x)
        #输出最后两个stage的特征图
        self.features.append(f2)
        self.features.append(f3)
        return [f2, f3]

    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
    
    def train_one_batch(self, imgs):
        with torch.no_grad():
            features = self.forward(imgs)
            embeddings = []
            for feature in features:
                m = torch.nn.AvgPool2d(3, 1, 1)
                # print(m(feature).shape)
                #torch.Size([32, 512, 28, 28])
                #torch.Size([32, 1024, 14, 14])
                embeddings.append(m(feature))
            embedding = embedding_concat(embeddings[0], embeddings[1])
            self.embedding_list.extend(reshape_embedding(np.array(embedding)))

    def train(self, dataloader):
        for imgs, _, _, file_name, _ in dataloader:
            self.train_one_batch(imgs)


    def train_after(self):
        total_embeddings = np.array(self.embedding_list)
        # Random projection
        self.randomprojector = SparseRandomProjection(n_components='auto', eps=0.9) # 'auto' => Johnson-Lindenstrauss lemma
        self.randomprojector.fit(total_embeddings)
        # Coreset Subsampling
        selector = kCenterGreedy(total_embeddings,0,0)
        selected_idx = selector.select_batch(model=self.randomprojector, already_selected=[], N=int(total_embeddings.shape[0]*self.args.coreset_sampling_ratio))
        self.embedding_coreset = total_embeddings[selected_idx]
        
        print('initial embedding size : ', total_embeddings.shape)
        print('final embedding size : ', self.embedding_coreset.shape)
        with open(os.path.join(self.embedding_dir_path, f'embedding_{self.args.category}.pickle'), 'wb') as f:
            pickle.dump(self.embedding_coreset, f)


    def save_anomaly_map(self, anomaly_map, input_img, gt_img, file_name, x_type):
        if anomaly_map.shape != input_img.shape:
            anomaly_map = cv2.resize(anomaly_map, (input_img.shape[0], input_img.shape[1]))
        anomaly_map_norm = min_max_norm(anomaly_map)
        anomaly_map_norm_hm = cvt2heatmap(anomaly_map_norm*255)

        # anomaly map on image
        heatmap = cvt2heatmap(anomaly_map_norm*255)
        hm_on_img = heatmap_on_image(heatmap, input_img)

        # save images
#         print(self.sample_path, 'path')
        cv2.imwrite(os.path.join(self.sample_path, f'{x_type}_{file_name}.jpg'), input_img)
        cv2.imwrite(os.path.join(self.sample_path, f'{x_type}_{file_name}_amap.jpg'), anomaly_map_norm_hm)   # 其他效仿 TODO
        cv2.imwrite(os.path.join(self.sample_path, f'{x_type}_{file_name}_amap_on_img.jpg'), hm_on_img)
        cv2.imwrite(os.path.join(self.sample_path, f'{x_type}_{file_name}_gt.jpg'), gt_img)

    def test_after(self, test_dataloader): # Nearest Neighbour Search
        self.embedding_coreset = pickle.load(open(os.path.join(self.embedding_dir_path, f'embedding_{self.args.category}.pickle'), 'rb'))
        for x, gt, label, file_name, x_type in tqdm(test_dataloader):
        
            # extract embedding
            features = self.forward(x)
            embeddings = []
            for feature in features:
                m = torch.nn.AvgPool2d(3, 1, 1)
                embeddings.append(m(feature))
            embedding_ = embedding_concat(embeddings[0], embeddings[1])
            embedding_test = np.array(reshape_embedding(embedding_.detach().numpy()))
            # NN
            nbrs = NearestNeighbors(n_neighbors=self.args.n_neighbors, algorithm='ball_tree', metric='minkowski', p=2).fit(self.embedding_coreset)
            score_patches, _ = nbrs.kneighbors(embedding_test)
            anomaly_map = score_patches[:,0].reshape((28,28))   # 28x28
            N_b = score_patches[np.argmax(score_patches[:,0])]   # patch
            w = (1 - (np.max(np.exp(N_b))/np.sum(np.exp(N_b))))
            score = w*max(score_patches[:,0]) # Image-level score
            
            gt_np = gt.cpu().numpy()[0,0].astype(int)
            anomaly_map_resized = cv2.resize(anomaly_map, (self.args.input_size, self.args.input_size))
            anomaly_map_resized_blur = gaussian_filter(anomaly_map_resized, sigma=4)
            
            self.gt_list_px_lvl.extend(gt_np.ravel())
            self.pred_list_px_lvl.extend(anomaly_map_resized_blur.ravel())
            self.gt_list_img_lvl.append(label.cpu().numpy()[0])
            self.pred_list_img_lvl.append(score)
            self.img_path_list.extend(file_name)  # add class
            # save images
            x = inv_normalize(x)    # denormalize
            input_x = cv2.cvtColor(x.permute(0,2,3,1).cpu().numpy()[0]*255, cv2.COLOR_BGR2RGB)
            self.save_anomaly_map(anomaly_map_resized_blur, input_x, gt_np*255, file_name[0], x_type[0])

    def evaluate(self):

        # 替换回metrics TODO，待实验
        print("Total pixel-level auc-roc score :")
        pixel_auc = roc_auc_score(self.gt_list_px_lvl, self.pred_list_px_lvl)
        print(pixel_auc)
        print("Total image-level auc-roc score :")
        # print(self.gt_list_img_lvl, 'xxx', self.pred_list_img_lvl)
        img_auc = roc_auc_score(self.gt_list_img_lvl, self.pred_list_img_lvl)   
        print(img_auc)
        print('test_epoch_end')
        values = {'pixel_auc': round(pixel_auc, 4), 'img_auc': round(img_auc, 4)}

        # anomaly_list = []
        # normal_list = []
        # for i in range(len(self.gt_list_img_lvl)):
        #     if self.gt_list_img_lvl[i] == 1:
        #         anomaly_list.append(self.pred_list_img_lvl[i])
        #     else:
        #         normal_list.append(self.pred_list_img_lvl[i])

        # # print("normal_list", normal_list, '\n', "anomaly_list", anomaly_list)

        # y_true = [0] * len(normal_list) + [1] * len(anomaly_list)
        # best_thr = estimate_thr_recall(y_true, normal_list+anomaly_list)

        best_thr = estimate_thr_recall(self.gt_list_img_lvl, self.pred_list_img_lvl)

        print('best_thr is:', best_thr)

        # best_thr_bylist = estimate_thr_by_list(y_true, np.array(normal_list+anomaly_list), step_p=0.1)
        # print('best_thr_bylist is:', best_thr_bylist)

        # thresholding
        noraml_recall, abnormal_recall, precision ,false_p, false_n = \
            cal_confusion_matrix(self.gt_list_img_lvl, self.pred_list_img_lvl, img_path_list=self.img_path_list, thresh=best_thr)
        values["noraml_recall"] = round(noraml_recall, 4)
        values["abnormal_recall"] = round(abnormal_recall, 4)
        values["precision"] = round(precision, 4)
        # add best thresh
        values["best_thr"] = round(best_thr, 6)
        values["false_p"] = false_p
        values["false_n"] = false_n

#         self.log_dict(values)
        print('test end')
        
        with open(os.path.join(self.args.result_dir,'results_patchcore.txt'), 'a') as f:
            f.write(self.args.category + ' : ' + str(values) + '\n')


