import numpy as np
import torch
from torch.autograd import Function
import torch.nn.functional as F
from torchvision.transforms import Normalize
from torch.autograd import Variable as V
import random


class GuidedBackpropReLU(Function):
    @staticmethod
    def forward(self, input_img):
        positive_mask = (input_img > 0).type_as(input_img)
        output = torch.addcmul(torch.zeros(input_img.size()).type_as(
            input_img), input_img, positive_mask)
        self.save_for_backward(input_img, output)
        return output

    @staticmethod
    def backward(self, grad_output):
        input_img, output = self.saved_tensors
        grad_input = None

        positive_mask_1 = (input_img > 0).type_as(grad_output)
        positive_mask_2 = (grad_output > 0).type_as(grad_output)
        grad_input = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img),
                                   torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), grad_output,
                                                 positive_mask_1), positive_mask_2)
        return grad_input

class GothroughReLU(Function):
    threshold = [0.7]
    @staticmethod
    def forward(self, input_img):
        positive_mask = (input_img > 0).type_as(input_img)
        output = torch.addcmul(torch.zeros(input_img.size()).type_as(
            input_img), input_img, positive_mask)
        self.save_for_backward(input_img, output)
        return output

    @staticmethod
    def backward(self, grad_output):
        input_img, output = self.saved_tensors
        quantile = []
        for i in range(0, input_img.shape[0], len(GothroughReLU.threshold)):
            example = grad_output[i]
            # neg + pos quantile
            quantile.append(torch.quantile(torch.abs(example), torch.tensor(GothroughReLU.threshold).type_as(input_img)))
            # pos quantile
            # quantile.append(torch.quantile(example[example > 0], GothroughReLU.threshold))
        
        mean_v = torch.cat(quantile)[:, None, None, None].type_as(input_img)
        # neg + pos quantile
        mean_mask = (torch.abs(grad_output) >= mean_v).type_as(input_img) * (input_img > 0).type_as(input_img)
        # pos quantile
        # mean_mask = (grad_output >= mean_v).type_as(input_img) * (input_img > 0).type_as(input_img)

        grad_input = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), grad_output, mean_mask)
        return grad_input

class DeconvReLU(Function):
    @staticmethod
    def forward(self, input_img):
        positive_mask = (input_img > 0).type_as(input_img)
        output = torch.addcmul(torch.zeros(input_img.size()).type_as(
            input_img), input_img, positive_mask)
        self.save_for_backward(input_img, output)
        return output

    @staticmethod
    def backward(self, grad_output):
        input_img, output = self.saved_tensors
        grad_input = None

        positive_mask = (grad_output > 0).type_as(grad_output)
        grad_input = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), grad_output,
                                                 positive_mask)
        return grad_input

class GothroughReLUModel:
    def __init__(self, model, threshold=[0.7]):
        self.model = model
        def recursive_relu_apply(module_top):
            for idx, module in module_top._modules.items():
                recursive_relu_apply(module)
                if module.__class__.__name__ == 'ReLU':                    
                    GothroughReLU.threshold = threshold
                    module_top._modules[idx] = GothroughReLU.apply

        # replace ReLU with DeconvReLU
        recursive_relu_apply(self.model)

        

    def forward(self, input_img):
        return self.model(input_img)

    def __call__(self, input_img, target_category=None):
        if self.cuda:
            input_img = input_img.cuda()

        input_img = input_img.requires_grad_(True)

        output = self.forward(input_img)

        if target_category is None:
            target_category = np.argmax(output.cpu().data.numpy())

        one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
        one_hot[0][target_category] = 1
        # one_hot = torch.from_numpy(one_hot).requires_grad_(True)
        one_hot = torch.from_numpy(one_hot)
        if self.cuda:
            one_hot = one_hot.cuda()

        one_hot = torch.sum(one_hot * output)
        # one_hot.backward(retain_graph=True)
        one_hot.backward()

        output = input_img.grad.cpu().data.numpy()
        output = output[0, :, :, :]
        output = output.transpose((1, 2, 0))
        return output

class DeconvReLUModel:
    def __init__(self, model):
        self.model = model

        def recursive_relu_apply(module_top):
            for idx, module in module_top._modules.items():
                recursive_relu_apply(module)
                if module.__class__.__name__ == 'ReLU':
                    module_top._modules[idx] = DeconvReLU.apply

        # replace ReLU with DeconvReLU
        recursive_relu_apply(self.model)

    def forward(self, input_img):
        return self.model(input_img)

    def __call__(self, input_img, target_category=None):
        if self.cuda:
            input_img = input_img.cuda()

        input_img = input_img.requires_grad_(True)

        output = self.forward(input_img)

        if target_category is None:
            target_category = np.argmax(output.cpu().data.numpy())

        one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
        one_hot[0][target_category] = 1
        # one_hot = torch.from_numpy(one_hot).requires_grad_(True)
        one_hot = torch.from_numpy(one_hot)
        if self.cuda:
            one_hot = one_hot.cuda()

        one_hot = torch.sum(one_hot * output)
        # one_hot.backward(retain_graph=True)
        one_hot.backward()

        output = input_img.grad.cpu().data.numpy()
        output = output[0, :, :, :]
        output = output.transpose((1, 2, 0))
        return output
   
    def clip_by_tensor(self, t, t_min, t_max):
        """
        clip_by_tensor
        :param t: tensor
        :param t_min: min
        :param t_max: max
        :return: cliped tensor
        """
        result = (t >= t_min).float() * t + (t < t_min).float() * t_min
        result = (result <= t_max).float() * result + \
            (result > t_max).float() * t_max
        return result

class GuidedBackpropReLUModel:
    def __init__(self, model):
        self.model = model

        def recursive_relu_apply(module_top):
            for idx, module in module_top._modules.items():
                recursive_relu_apply(module)
                if module.__class__.__name__ == 'ReLU':
                    module_top._modules[idx] = GuidedBackpropReLU.apply

        # replace ReLU with GuidedBackpropReLU
        recursive_relu_apply(self.model)

    def forward(self, input_img):
        return self.model(input_img)

    def __call__(self, input_img, target_category=None):
        if self.cuda:
            input_img = input_img.cuda()

        input_img = input_img.requires_grad_(True)

        output = self.forward(input_img)

        if target_category is None:
            target_category = np.argmax(output.cpu().data.numpy())

        one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
        one_hot[0][target_category] = 1
        # one_hot = torch.from_numpy(one_hot).requires_grad_(True)
        one_hot = torch.from_numpy(one_hot)
        if self.cuda:
            one_hot = one_hot.cuda()

        one_hot = torch.sum(one_hot * output)
        # one_hot.backward(retain_graph=True)
        one_hot.backward()

        output = input_img.grad.cpu().data.numpy()
        output = output[0, :, :, :]
        output = output.transpose((1, 2, 0))
        return output

class BaselineModel:
    def __init__(self, model):
        self.model = model

    def forward(self, input_img):
        return self.model(input_img)

    def __call__(self, input_img, target_category=None):
        if self.cuda:
            input_img = input_img.cuda()

        input_img = input_img.requires_grad_(True)

        output = self.forward(input_img)

        if target_category is None:
            target_category = np.argmax(output.cpu().data.numpy())

        one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
        one_hot[0][target_category] = 1
        # one_hot = torch.from_numpy(one_hot).requires_grad_(True)
        one_hot = torch.from_numpy(one_hot)
        if self.cuda:
            one_hot = one_hot.cuda()

        one_hot = torch.sum(one_hot * output)
        # one_hot.backward(retain_graph=True)
        one_hot.backward()

        output = input_img.grad.cpu().data.numpy()
        output = output[0, :, :, :]
        output = output.transpose((1, 2, 0))
        return output