# coding:utf-8
import random
from random import sample
import argparse
import numpy as np
import os
import pickle
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.models import wide_resnet50_2, resnet18
import datasets.mvtec as mvtec
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.covariance import LedoitWolf
from scipy.spatial.distance import mahalanobis
from scipy.ndimage import gaussian_filter
from skimage import morphology
from skimage.segmentation import mark_boundaries
import matplotlib.pyplot as plt
import matplotlib
import time
from fungai import mul_mahalanobios

np.seterr(divide='ignore', invalid='ignore')

use_cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')


def denormalization(x):
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    x = (((x.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8)

    return x


def embedding_concat(x, y):
    B, C1, H1, W1 = x.size()
    _, C2, H2, W2 = y.size()
    s = int(H1 / H2)
    x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
    x = x.view(B, C1, -1, H2, W2)
    print(x.shape)
    z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
    for i in range(x.size(2)):
        z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)

    z = z.view(B, -1, H2 * W2)
    print(z.shape)
    z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
    print(z.shape)
    return z


def plot_fig(test_img, scores, gts, threshold, save_dir, class_name):
    num = len(scores)
    vmax = scores.max() * 255.
    vmin = scores.min() * 255.
    for i in range(num):
        img = test_img[i]
        img = denormalization(img)
        gt = gts[i].transpose(1, 2, 0).squeeze()
        heat_map = scores[i] * 255
        mask = scores[i]
        mask[mask > threshold] = 1
        mask[mask <= threshold] = 0
        kernel = morphology.disk(4)
        mask = morphology.opening(mask, kernel)
        mask *= 255
        vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
        # fig_img,ax_img = plt.subplots(1, 2, figsize=(12, 3))
        # fig_img.subplots_adjust(right=0.9)
        ax_img = plt.subplots(1, 1)
        norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
        # for ax_i in ax_img:
        #     ax_i.axes.xaxis.set_visible(False)
        #     ax_i.axes.yaxis.set_visible(False)
        # ax_img[0].imshow(img)
        # ax_img[0].title.set_text('Image')
        # ax_img[1].imshow(gt, cmap='gray')
        # ax_img[1].title.set_text('GroundTruth')

        ax = ax_img[1].imshow(heat_map, cmap='hot', norm=norm)

        ax_img[1].axis('off')  # 隐藏刻度

        ax_img[1].imshow(img, cmap='gray', interpolation='none')
        # ax_img[1].imshow(heat_map, cmap='Blues', alpha=0.7, interpolation='none')
        # ax_img[1].imshow(heat_map, cmap='BuGn', alpha=0.7, interpolation='none')
        # ax_img[1].imshow(heat_map, cmap='BuPu', alpha=0.7, interpolation='none')
        # ax_img[1].imshow(heat_map, cmap='OrRd', alpha=0.7, interpolation='none')
        # ax_img[1].imshow(heat_map, cmap='Greys', alpha=0.7, interpolation='none')
        ax_img[1].imshow(heat_map, cmap='PuRd', alpha=0.7, interpolation='none')

        # fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=100)
        # 去除白边
        ax_img[1].figure.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), transparent=True, dpi=100,
                                 pad_inches=0, bbox_inches='tight')
        plt.close()


def parse_args():
    parser = argparse.ArgumentParser('PaDiM')
    # parser.add_argument('--data_path', type=str, default='./dataset/mvtec_anomaly_detection')
    # parser.add_argument('--save_path', type=str, default='./mvtec_result')
    parser.add_argument('--data_path', type=str, default='E:/play/flask/picture_test/Padim/dataset/mvtec_anomaly_detection')
    parser.add_argument('--save_path', type=str, default='E:/play/flask/picture_test/Padim/mvtec_result')
    parser.add_argument('--arch', type=str, choices=['resnet18', 'wide_resnet50_2'], default='resnet18')
    return parser.parse_args()


args = parse_args()
if args.arch == 'resnet18':
    model = resnet18(pretrained=False)
    # model.load_state_dict(torch.load('../pth/resnet18-5c106cde.pth'))
    model.load_state_dict(torch.load('E:/play/flask/picture_test/Padim/pth/resnet18-5c106cde.pth'))

    t_d = 448
    d = 100

model.to(device)
model.eval()
random.seed(1024)
torch.manual_seed(1024)
if use_cuda:
    torch.cuda.manual_seed_all(1024)
idx = torch.tensor(sample(range(0, t_d), d))  # 随机取100维
outputs = []


def hook(module, input, output):  # 获取中间层特征图
    outputs.append(output)


model.layer1[-1].register_forward_hook(hook)
model.layer2[-1].register_forward_hook(hook)
model.layer3[-1].register_forward_hook(hook)
os.makedirs(os.path.join(args.save_path, 'temp_%s' % args.arch), exist_ok=True)


total_roc_auc = []
total_pixel_roc_auc = []
class_name = 'big'

train_feature_filepath = os.path.join(args.save_path, 'temp_%s' % args.arch, 'train_%s.pkl' % class_name)
with open(train_feature_filepath, 'rb') as f:
    train_outputs = pickle.load(f)
start = time.time()
test_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=False)
test_dataloader = DataLoader(test_dataset, batch_size=32, pin_memory=True)
test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
gt_list = []
gt_mask_list = []
test_imgs = []
for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):
    test_imgs.extend(x.cpu().detach().numpy())
    gt_list.extend(y.cpu().detach().numpy())
    gt_mask_list.extend(mask.cpu().detach().numpy())
    # model prediction
    with torch.no_grad():
        _ = model(x.to(device))
        # get intermediate layer outputs
    for k, v in zip(test_outputs.keys(), outputs):
        test_outputs[k].append(v.cpu().detach())
    outputs = []
for k, v in test_outputs.items():
    test_outputs[k] = torch.cat(v, 0)  # 每层的特征图拼接
embedding_vectors = test_outputs['layer1']
for layer_name in ['layer2', 'layer3']:
    embedding_vectors = embedding_concat(embedding_vectors, test_outputs[layer_name])  # 将特征向量拼接
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W).numpy()
dist_list = []
means = train_outputs[0].T
con_inv = train_outputs[1].transpose(2, 0, 1)

scores = []
for i in embedding_vectors:
    j = i.T
    dists = mul_mahalanobios(j, means, con_inv)
    dist_max = np.max(dists)
    scores.append(dist_max)
    dist_list.append(dists)
print(scores)
end = time.time()
print('测试1时间：{:.4f}s'.format(end - start))
dist_list = np.array(dist_list).reshape(B, H, W)
dist_list = torch.tensor(dist_list)
score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear', align_corners=False).squeeze(
    1).numpy()
for i in range(score_map.shape[0]):
    score_map[i] = gaussian_filter(score_map[i], sigma=4)

max_score = score_map.max()
min_score = score_map.min()
scores = (score_map - min_score) / (max_score - min_score)
img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
gt_list = np.asarray(gt_list)
gt_mask = np.asarray(gt_mask_list)
precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), scores.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
threshold = thresholds[np.argmax(f1)]
save_dir = args.save_path + '/' + f'big_{args.arch}'
os.makedirs(save_dir, exist_ok=True)
plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name)
end = time.time()
print('测试时间：{:.4f}s'.format(end - start))
# for i in embedding_vectors:
#     dists=[]
#     j=i.T
#     print(j.shape)
#     for k in range(len(j)):
#         mean=means[k]
#         con_in=con_inv[k]
#         dist=mahalanobis(j[k],mean,con_in)
#         dists.append(dist)
#     dist_list = np.array(dists)
#     dists=np.max(dist_list)
#     scores.append(dists)
# print(scores)
