from torch import nn
from sklearn.metrics import roc_curve, auc,roc_auc_score
from scipy.ndimage.filters import gaussian_filter
import numpy as np
import torch
from torch.autograd import Variable
from copy import deepcopy
from torch.nn import ReLU
import yaml
import cv2
from tqdm import tqdm
device = 'cuda' if torch.cuda.is_available() else 'cpu'

def sigmoid(x):
    # 改为向量版本
    return np.where(x >= 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x)))

# def localization_test(model, vgg, test_dataloader, config):
#     localization_method = config['localization_method']
#     if localization_method == 'gradients':
#         grad = gradients_localization(model, vgg, test_dataloader, config)
#     if localization_method == 'smooth_grad':
#         grad,gt = smooth_grad_localization(model, vgg, test_dataloader, config)
#     if localization_method == 'gbp':
#         grad,gt = gbp_localization(model, vgg, test_dataloader, config)
#     return compute_localization_auc(grad,gt)
def localization_test(model, vgg, test_dataloader, config):
    localization_method = config['localization_method']
    if localization_method == 'gradients':
        grad = gradients_localization(model, vgg, test_dataloader, config)
    if localization_method == 'smooth_grad':
        grad,gt = smooth_grad_localization(model, vgg, test_dataloader, config)
    if localization_method == 'gbp':
        grad,gt = gbp_localization(model, vgg, test_dataloader, config)
    return grad,gt

def grad_calc(inputs, model, vgg, config):
    inputs = inputs.cuda()   # 
    inputs.requires_grad = True
    temp = torch.zeros(inputs.shape)
    lamda = config['lamda']
    criterion = nn.MSELoss()
    similarity_loss = torch.nn.CosineSimilarity()

    for i in range(inputs.shape[0]):
        output_pred = model.forward(inputs[i].unsqueeze(0), target_layer=14)
        output_real = vgg(inputs[i].unsqueeze(0))
        y_pred_1, y_pred_2, y_pred_3 = output_pred[6], output_pred[9], output_pred[12]
        y_1, y_2, y_3 = output_real[6], output_real[9], output_real[12]
        abs_loss_1 = criterion(y_pred_1, y_1)
        loss_1 = torch.mean(1 - similarity_loss(y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1)))
        abs_loss_2 = criterion(y_pred_2, y_2)
        loss_2 = torch.mean(1 - similarity_loss(y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1)))
        abs_loss_3 = criterion(y_pred_3, y_3)
        loss_3 = torch.mean(1 - similarity_loss(y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1)))
        total_loss = loss_1 + loss_2 + loss_3 + lamda * (abs_loss_1 + abs_loss_2 + abs_loss_3)
        model.zero_grad()
        total_loss.backward()

        temp[i] = inputs.grad[i]

    return temp


def gradients_localization(model, vgg, test_dataloader, config):
    model.eval()
    print("Vanilla Backpropagation:")
    temp = None
    for data in test_dataloader:
        X,GT, Y,_ = data   # TODO 数据格式

        grad = grad_calc(X, model, vgg, config)
        temp = np.zeros((grad.shape[0], grad.shape[2], grad.shape[3]))
        for i in range(grad.shape[0]):
            grad_temp = convert_to_grayscale(grad[i].cpu().numpy())
            grad_temp = grad_temp.squeeze(0)
            grad_temp = gaussian_filter(grad_temp, sigma=4)
            temp[i] = grad_temp

    return temp


class VanillaSaliency():
    def __init__(self, model, vgg, device, config):
        self.model = model
        self.vgg = vgg
        self.device = device
        self.config = config
        self.model.eval()

    def generate_saliency(self, data, make_single_channel=True):
        data_var_sal = Variable(data).to(self.device)
        self.model.zero_grad()
        if data_var_sal.grad is not None:
            data_var_sal.grad.data.zero_()
        data_var_sal.requires_grad_(True)

        lamda = self.config['lamda']
        criterion = nn.MSELoss()
        similarity_loss = torch.nn.CosineSimilarity()

        output_pred = self.model.forward(data_var_sal)
        output_real = self.vgg(data_var_sal)
        y_pred_1, y_pred_2, y_pred_3 = output_pred[6], output_pred[9], output_pred[12]
        y_1, y_2, y_3 = output_real[6], output_real[9], output_real[12]

        abs_loss_1 = criterion(y_pred_1, y_1)
        loss_1 = torch.mean(1 - similarity_loss(y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1)))
        abs_loss_2 = criterion(y_pred_2, y_2)
        loss_2 = torch.mean(1 - similarity_loss(y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1)))
        abs_loss_3 = criterion(y_pred_3, y_3)
        loss_3 = torch.mean(1 - similarity_loss(y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1)))
        total_loss = loss_1 + loss_2 + loss_3 + lamda * (abs_loss_1 + abs_loss_2 + abs_loss_3)
        self.model.zero_grad()
        total_loss.backward()
        grad = data_var_sal.grad.data.detach().cpu()

        if make_single_channel:
            grad = np.asarray(grad.detach().cpu().squeeze(0))
            # grad = max_regarding_to_abs(np.max(grad, axis=0), np.min(grad, axis=0))
            # grad = np.expand_dims(grad, axis=0)
            grad = convert_to_grayscale(grad)
        else:
            grad = np.asarray(grad)

        return grad


def generate_smooth_grad(data, param_n, param_sigma_multiplier, vbp, single_channel=True):
    smooth_grad = None

    mean = 0
    sigma = param_sigma_multiplier / (torch.max(data) - torch.min(data)).item()
    VBP = vbp
    for x in range(param_n):
        noise = Variable(data.data.new(data.size()).normal_(mean, sigma ** 2))
        noisy_img = data + noise
        vanilla_grads = VBP.generate_saliency(noisy_img, single_channel)
        if not isinstance(vanilla_grads, np.ndarray):
            vanilla_grads = vanilla_grads.detach().cpu().numpy()
        if smooth_grad is None:
            smooth_grad = vanilla_grads
        else:
            smooth_grad = smooth_grad + vanilla_grads

    smooth_grad = smooth_grad / param_n

    return smooth_grad


class IntegratedGradients():
    def __init__(self, model, vgg, device):
        self.model = model
        self.vgg = vgg
        self.gradients = None
        self.device = device
        # Put model in evaluation mode
        self.model.eval()

    def generate_images_on_linear_path(self, input_image, steps):
        step_list = np.arange(steps + 1) / steps
        xbar_list = [input_image * step for step in step_list]
        return xbar_list

    def generate_gradients(self, input_image, make_single_channel=True):
        vanillaSaliency = VanillaSaliency(self.model, self.vgg, self.device)
        saliency = vanillaSaliency.generate_saliency(input_image, make_single_channel)
        if not isinstance(saliency, np.ndarray):
            saliency = saliency.detach().cpu().numpy()
        return saliency

    def generate_integrated_gradients(self, input_image, steps, make_single_channel=True):
        xbar_list = self.generate_images_on_linear_path(input_image, steps)
        integrated_grads = None
        for xbar_image in xbar_list:
            single_integrated_grad = self.generate_gradients(xbar_image, False)
            if integrated_grads is None:
                integrated_grads = deepcopy(single_integrated_grad)
            else:
                integrated_grads = (integrated_grads + single_integrated_grad)
        integrated_grads /= steps
        saliency = integrated_grads[0]
        img = input_image.detach().cpu().numpy().squeeze(0)
        saliency = np.asarray(saliency) * img
        if make_single_channel:
            saliency = max_regarding_to_abs(np.max(saliency, axis=0), np.min(saliency, axis=0))

        return saliency


def generate_integrad_saliency_maps(model, vgg, preprocessed_image, device, steps=100, make_single_channel=True):
    IG = IntegratedGradients(model, vgg, device)
    integrated_grads = IG.generate_integrated_gradients(preprocessed_image, steps, make_single_channel)
    if make_single_channel:
        integrated_grads = convert_to_grayscale(integrated_grads)

    return integrated_grads


class GuidedBackprop():
    def __init__(self, model, vgg, device):
        self.model = model
        self.vgg = vgg
        self.gradients = None
        self.forward_relu_outputs = []
        self.device = device
        self.hooks = []
        self.model.eval()
        self.update_relus()

    def update_relus(self):

        def relu_backward_hook_function(module, grad_in, grad_out):
            corresponding_forward_output = self.forward_relu_outputs[-1]

            corresponding_forward_output[corresponding_forward_output > 0] = 1

            modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
            del self.forward_relu_outputs[-1]  # Remove last forward output
            return (modified_grad_out,)

        def relu_forward_hook_function(module, ten_in, ten_out):
            self.forward_relu_outputs.append(ten_out)

        # Loop through layers, hook up ReLUs
        for module in self.model.modules():
            if isinstance(module, ReLU):
                self.hooks.append(module.register_backward_hook(relu_backward_hook_function))
                self.hooks.append(module.register_forward_hook(relu_forward_hook_function))

    def generate_gradients(self, input_image, config, make_single_channel=True):
        vanillaSaliency = VanillaSaliency(self.model, self.vgg, self.device, config=config)
        sal = vanillaSaliency.generate_saliency(input_image, make_single_channel)
        if not isinstance(sal, np.ndarray):
            sal = sal.detach().cpu().numpy()
        for hook in self.hooks:
            hook.remove()
        return sal

def gradients_localization(model, vgg, test_dataloader, config):
    model.eval()
    print("Vanilla Backpropagation:")
    temp = None
    for data in test_dataloader:
        X,GT, Y,_ = data   # TODO 数据格式

        grad = grad_calc(X, model, vgg, config)
        temp = np.zeros((grad.shape[0], grad.shape[2], grad.shape[3]))
        for i in range(grad.shape[0]):
            grad_temp = convert_to_grayscale(grad[i].cpu().numpy())
            grad_temp = grad_temp.squeeze(0)
            grad_temp = gaussian_filter(grad_temp, sigma=4)
            temp[i] = grad_temp

    return temp


def gbp_localization(model, vgg, test_dataloader,cfg):
    model.eval()
    print("GBP Method:")

    # grad1 = None
    grads = []
    i = 0
    
    size = cfg['input_size']
    for data in tqdm(test_dataloader):
        x, y, mask, path = data
        # grad1 = np.zeros((x.shape[0], 1, 128, 128), dtype=np.float32)
        grad = []
        for x_ in x:
            data = x_.view(1, 3, size, size)

            GBP = GuidedBackprop(model, vgg, 'cuda:0')
            gbp_saliency = abs(GBP.generate_gradients(data, cfg))
            gbp_saliency = (gbp_saliency - min(gbp_saliency.flatten())) / (
                    max(gbp_saliency.flatten()) - min(gbp_saliency.flatten()))
            saliency = gbp_saliency

            saliency = gaussian_filter(saliency, sigma=4)
            # grad1[i] = saliency
            grad.extend(saliency)
            # i += 1
        grads.extend(grad)
    grads = np.array(grads)
    grads = grads.reshape(-1, size, size)
    GT = mask.cpu().numpy().reshape(-1,1, size, size)
    GT = np.transpose(GT, (0, 2, 3, 1))
    return grads,GT


def smooth_grad_localization(model, vgg, test_dataloader, config):
    model.eval()
    print("Smooth Grad Method:")

    grad1 = None
    i = 0

    for data in test_dataloader:
        X,GT, Y,_ = data   # TODO 
        grad1 = np.zeros((X.shape[0], 1, 128, 128), dtype=np.float32)
        for x in X:
            data = x.view(1, 3, 128, 128)

            vbp = VanillaSaliency(model, vgg, 'cuda:0', config)

            smooth_grad_saliency = abs(generate_smooth_grad(data, 50, 0.05, vbp))
            smooth_grad_saliency = (smooth_grad_saliency - min(smooth_grad_saliency.flatten())) / (
                    max(smooth_grad_saliency.flatten()) - min(smooth_grad_saliency.flatten()))
            saliency = smooth_grad_saliency

            saliency = gaussian_filter(saliency, sigma=4)
            grad1[i] = saliency
            i += 1

    grad1 = grad1.reshape(-1, 128, 128)
    GT = GT.cpu().numpy().reshape(-1,1, 128, 128)
    GT = np.transpose(GT, (0, 2, 3, 1))
    return grad1,GT


def compute_localization_auc(grad, x_ground):
    tpr = []
    fpr = []

    x_ground_comp = np.mean(x_ground, axis=3)

    thresholds = [0.001 * i for i in range(1000)]

    from matplotlib import pyplot as plt
    import os

    save_grad_dir = '/workspace/_Weights/anomaly_lab/kdad/sample'  # TODO 统一配置文件
    if not os.path.exists(save_grad_dir):
        os.makedirs(save_grad_dir)

    for i, g in enumerate(grad):
        plt.imsave(os.path.join(save_grad_dir, f'{i}.png'), g)

    for threshold in thresholds:
        grad_t = 1.0 * (grad >= threshold)
        grad_t = morphological_process(grad_t)

        tp_map = np.multiply(grad_t, x_ground_comp)
        tpr.append(np.sum(tp_map) / np.sum(x_ground_comp))

        inv_x_ground = 1 - x_ground_comp
        fp_map = np.multiply(grad_t, inv_x_ground)
        tn_map = np.multiply(1 - grad_t, 1 - x_ground_comp)
        fpr.append(np.sum(fp_map) / (np.sum(fp_map) + np.sum(tn_map)))
    

    
    plt.figure()
    lw = 2
    plt.plot(fpr, tpr, color='darkorange',
            lw=lw, label='ROC curve (area = %0.2f)' % auc(fpr, tpr))
    plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic example')
    plt.legend(loc="lower right")
    plt.show()
    plt.savefig('/home/ops/lnq/anomaly_lab/kdad/capsule_reslut.png')   # cla

    return auc(fpr, tpr)

def get_config(config):
    # 改
    with open(config, 'r') as stream:
        return yaml.load(stream)


def convert_to_grayscale(im_as_arr):
    grayscale_im = np.sum(np.abs(im_as_arr), axis=0)
    im_max = np.percentile(grayscale_im, 99)
    im_min = np.min(grayscale_im)
    grayscale_im = (np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1))
    grayscale_im = np.expand_dims(grayscale_im, axis=0)
    return grayscale_im


# opening morphological process for localization
def morphological_process(x):
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    kernel = kernel.astype(np.uint8)
    binary_map = x.astype(np.uint8)
    opening = cv2.morphologyEx(binary_map[0], cv2.MORPH_OPEN, kernel)
    opening = opening.reshape(1, opening.shape[0], opening.shape[1])
    for index in range(1, binary_map.shape[0]):
        temp = cv2.morphologyEx(binary_map[index], cv2.MORPH_OPEN, kernel)
        temp = temp.reshape(1, temp.shape[0], temp.shape[1])
        opening = np.concatenate((opening, temp), axis=0)
    return opening


def max_regarding_to_abs(a, b):
    c = np.zeros(a.shape)
    for i in range(len(a)):
        for j in range(len(a[0])):
            if abs(a[i][j]) >= abs(b[i][j]):
                c[i][j] = a[i][j]
            else:
                c[i][j] = b[i][j]
    return c
