import torch
from torch import nn
from torchvision import models, transforms
from PIL import Image
import matplotlib.pyplot as plt
from LoadModel import load_alexnet
from Utils import get_awa2_images

class Guided_backprop():
    def __init__(self, model):
        self.model = model
        self.image_reconstruction = None
        self.activation_maps = []
        self.model.eval()
        self.register_hooks()

    def register_hooks(self):
        def first_layer_hook_fn(module, grad_in, grad_out):
            # 在全局变量中保存输入图片的梯度，该梯度由第一层卷积层
            # 反向传播得到，因此该函数需绑定第一个 Conv2d Layer
            # print("here")
            self.image_reconstruction = grad_in[0]

        def forward_hook_fn(module, input, output):
            # 在全局变量中保存 ReLU 层的前向传播输出
            # 用于将来做 guided backpropagation
            self.activation_maps.append(output)

        def backward_hook_fn(module, grad_in, grad_out):
            # ReLU 层反向传播时，用其正向传播的输出作为 guide
            # 反向传播和正向传播相反，先从后面传起
            grad = self.activation_maps.pop() 
            # ReLU 正向传播的输出要么大于0，要么等于0，
            # 大于 0 的部分，梯度为1，
            # 等于0的部分，梯度还是 0
            grad[grad > 0] = 1 
            
            # grad_out[0] 表示 feature 的梯度，只保留大于 0 的部分
            positive_grad_out = torch.clamp(grad_out[0], min=0.0)
            # 创建新的输入端梯度
            new_grad_in = positive_grad_out * grad

            # ReLU 不含 parameter，输入端梯度是一个只有一个元素的 tuple
            return (new_grad_in,)


        # 获取 module，这里只针对 alexnet，如果是别的，则需修改
        # modules = list(self.model.features.named_children())
        modules = list(self.model.conv.named_children())
        # print(modules)
        # exit(0)

        # 遍历所有 module，对 ReLU 注册 forward hook 和 backward hook
        for name, module in modules:
            if isinstance(module, nn.ReLU):
                # print(name)
                module.register_forward_hook(forward_hook_fn)
                module.register_backward_hook(backward_hook_fn)

        # 对第1层卷积层注册 hook
        first_layer = modules[0][1]
        first_layer.register_backward_hook(first_layer_hook_fn)
        # exit(0)

    def visualize(self, input_image, target_class):
        input_image.requires_grad_(True)
        # 获取输出，之前注册的 forward hook 开始起作用
        model_output = self.model(input_image)
        self.model.zero_grad()
        pred_class = model_output.argmax().item()
        
        # 生成目标类 one-hot 向量，作为反向传播的起点
        grad_target_map = torch.zeros(model_output.shape,
                                      dtype=torch.float)
        if target_class is not None:
            grad_target_map[0][target_class] = 1
        else:
            grad_target_map[0][pred_class] = 1
        
        # 反向传播，之前注册的 backward hook 开始起作用 
        model_output.backward(grad_target_map)
        # 得到 target class 对输入图片的梯度，转换成图片格式
        result = self.image_reconstruction.data[0].permute(1,2,0)
        return result.numpy()

def normalize(I):
    # 归一化梯度map，先归一化到 mean=0 std=1
    norm = (I-I.mean())/I.std()
    # 把 std 重置为 0.1，让梯度map中的数值尽可能接近 0
    norm = norm * 0.1
    # 均值加 0.5，保证大部分的梯度值为正
    norm = norm + 0.5
    # 把 0，1 以外的梯度值分别设置为 0 和 1
    norm = norm.clip(0, 1)
    return norm

if __name__=='__main__':

    # image_path = './cat.png'
    # I = Image.open(image_path).convert('RGB')
    # means = [0.485, 0.456, 0.406]
    # stds = [0.229, 0.224, 0.225]
    # size = 224

    # transform = transforms.Compose([
    #     transforms.Resize(size),
    #     transforms.CenterCrop(size),
    #     transforms.ToTensor(),
    #     transforms.Normalize(means, stds)
    # ])

    # tensor = transform(I).unsqueeze(0).requires_grad_()

    origin_images, input_images, labels = get_awa2_images(128)
    image = input_images[0, :].unsqueeze(0)
    label = labels[0].unsqueeze(0)

    # model = models.alexnet(pretrained=True)
    model = load_alexnet(50, 3)
    print(model)

    guided_bp = Guided_backprop(model)
    result = guided_bp.visualize(image, label)

    result = normalize(result)
    # print(result.shape)
    # plt.savefig("")
    plt.imsave("gbp.jpg", result)
    # plt.imshow(result)
    # plt.show()


    print('END')


# import torch
# from torch.nn import ReLU
# from torchvision import models
# import numpy as np
# from PIL import Image, ImageFilter
# from LoadModel import load_alexnet
# from Utils import get_awa2_images


# class GuidedBackprop():
#     """
#        Produces gradients generated with guided back propagation from the given image
#     """
#     def __init__(self, model):
#         self.model = model
#         self.gradients = None
#         self.forward_relu_outputs = []
#         # Put model in evaluation mode
#         self.model.eval()
#         self.update_relus()
#         self.hook_layers()

#     def hook_layers(self):
#         def hook_function(module, grad_in, grad_out):
#             self.gradients = grad_in[0]
#         # Register hook to the first layer
#         first_layer = list(self.model.conv._modules.items())[0][1]
#         # first_layer = list(self.model.features._modules.items())[0][1]
#         first_layer.register_backward_hook(hook_function)


#     def update_relus(self):
#         """
#             Updates relu activation functions so that
#                 1- stores output in forward pass
#                 2- imputes zero for gradient values that are less than zero
#         """
#         def relu_backward_hook_function(module, grad_in, grad_out):
#             """
#             If there is a negative gradient, change it to zero
#             """
#             # Get last forward output
#             corresponding_forward_output = self.forward_relu_outputs[-1]
#             corresponding_forward_output[corresponding_forward_output > 0] = 1
#             modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
#             del self.forward_relu_outputs[-1]  # Remove last forward output
#             return (modified_grad_out,)

#         def relu_forward_hook_function(module, ten_in, ten_out):
#             """
#             Store results of forward pass
#             """
#             self.forward_relu_outputs.append(ten_out)

#         # Loop through layers, hook up ReLUs
#         for pos, module in self.model.conv._modules.items():
#         # for pos, module in self.model.features._modules.items():
#             if isinstance(module, ReLU):
#                 # print(pos)
#                 module.register_backward_hook(relu_backward_hook_function)
#                 module.register_forward_hook(relu_forward_hook_function)

#     def generate_gradients(self, input_image, target_class):
#         # Forward pass
#         input_image.requires_grad_(True)
#         model_output = self.model(input_image)
#         # Zero gradients
#         self.model.zero_grad()
#         # Target for backprop
#         one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
#         one_hot_output[0][target_class] = 1
#         # Backward pass
#         model_output.backward(gradient=one_hot_output)
#         # Convert Pytorch variable to numpy array
#         # [0] to get rid of the first channel (1,3,224,224)
#         gradients_as_arr = self.gradients.data.numpy()[0]
#         return gradients_as_arr


# def format_np_output(np_arr):
#     """
#         This is a (kind of) bandaid fix to streamline saving procedure.
#         It converts all the outputs to the same format which is 3xWxH
#         with using sucecssive if clauses.
#     Args:
#         im_as_arr (Numpy array): Matrix of shape 1xWxH or WxH or 3xWxH
#     """
#     # Phase/Case 1: The np arr only has 2 dimensions
#     # Result: Add a dimension at the beginning
#     if len(np_arr.shape) == 2:
#         np_arr = np.expand_dims(np_arr, axis=0)
#     # Phase/Case 2: Np arr has only 1 channel (assuming first dim is channel)
#     # Result: Repeat first channel and convert 1xWxH to 3xWxH
#     if np_arr.shape[0] == 1:
#         np_arr = np.repeat(np_arr, 3, axis=0)
#     # Phase/Case 3: Np arr is of shape 3xWxH
#     # Result: Convert it to WxHx3 in order to make it saveable by PIL
#     if np_arr.shape[0] == 3:
#         np_arr = np_arr.transpose(1, 2, 0)
#     # Phase/Case 4: NP arr is normalized between 0-1
#     # Result: Multiply with 255 and change type to make it saveable by PIL
#     if np.max(np_arr) <= 1:
#         np_arr = (np_arr*255).astype(np.uint8)
#     return np_arr


# def save_gradient_images(gradient, path):
#     gradient = gradient - gradient.min()
#     gradient /= gradient.max()
#     im = gradient
#     if isinstance(im, (np.ndarray, np.generic)):
#         im = format_np_output(im)
#         im = Image.fromarray(im)
#     im.save(path)


# def convert_to_grayscale(im_as_arr):
#     """
#         Converts 3d image to grayscale
#     Args:
#         im_as_arr (numpy arr): RGB image with shape (D,W,H)
#     returns:
#         grayscale_im (numpy_arr): Grayscale image with shape (1,W,D)
#     """
#     grayscale_im = np.sum(np.abs(im_as_arr), axis=0)
#     im_max = np.percentile(grayscale_im, 99)
#     im_min = np.min(grayscale_im)
#     grayscale_im = (np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1))
#     grayscale_im = np.expand_dims(grayscale_im, axis=0)
#     return grayscale_im


# def get_positive_negative_saliency(gradient):
#     """
#         Generates positive and negative saliency maps based on the gradient
#     Args:
#         gradient (numpy arr): Gradient of the operation to visualize
#     returns:
#         pos_saliency ( )
#     """
#     pos_saliency = (np.maximum(0, gradient) / gradient.max())
#     neg_saliency = (np.maximum(0, -gradient) / -gradient.min())
#     return pos_saliency, neg_saliency


# if __name__ == '__main__':
#     target_example = 0  # Snake
#     origin_images, input_images, labels = get_awa2_images(128)
#     image = input_images[0, :].unsqueeze(0)
#     label = labels[0].unsqueeze(0)
#     # print(labels, label)
#     # exit(0)
#     model = load_alexnet(50, 3)
#     # model = models.alexnet(pretrained=True)
#     # (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
#     #     get_example_params(target_example)

#     # Guided backprop
#     GBP = GuidedBackprop(model)
#     # Get gradients
#     guided_grads = GBP.generate_gradients(image, label)


#     # Save colored gradients
#     save_gradient_images(guided_grads, "gbp_gradient.jpg")
#     # exit(0)
#     # Convert to grayscale
#     grayscale_guided_grads = convert_to_grayscale(guided_grads)
#     # Save grayscale gradients
#     save_gradient_images(grayscale_guided_grads, "gbp_gradient_grey.jpg")
#     # # Positive and negative saliency maps
#     # pos_sal, neg_sal = get_positive_negative_saliency(guided_grads)
#     # save_gradient_images(pos_sal, "gbp_gradient_pos.jpg")
#     # save_gradient_images(neg_sal, "gbp_gradient_neg.jpg")
#     # exit(0)
#     # print('Guided backprop completed')

