# implementing basic grad_cam
# author-by: xjtu-blacksmith
# create-on: 2020.2.25

# reference: <https://github.com/kazuto1011/grad-cam-pytorch>

import os
from os import path

import cv2
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from torch.nn import functional as F
import numpy as np

class _BasicWrapper():
    '''
        Basic class to hold hook, forward / backward methods
    '''
    def __init__(self, model):
        super(_BasicWrapper, self).__init__()
        self.model = model
        self.handlers = []  # for hook
    
    def _encode_one_hot(self, ids):
        ''' get one-hot vector for selected id
        '''
        one_hot = torch.zeros_like(self.logits)
        one_hot.scatter_(1, ids, 1.0)
        return one_hot
    
    def forward(self, x):
        self.image_shape = x.shape[2:]  # remove dimension of minibatch
        self.logits = self.model(x)  # get logits output (no softmax layer at end)
        self.probs = F.softmax(self.logits, dim=1)  # softmax from logits
        return self.probs.sort(dim=1, descending=True)  # from high to low
    
    def backward(self, ids):
        one_hot = self._encode_one_hot(ids)  # get one-hot codes from classes
        self.model.zero_grad()
        self.logits.backward(gradient=one_hot, retain_graph=True)  # backward by grad=one-hot code
    
    def generate(self):
        ''' to generate visiualization results from hooks
        '''
        raise NotImplementedError

    def remove_hook(self):
        for handle in self.handlers:
            handle.remove()

class BackPropagation(_BasicWrapper):
    ''' get gradients of image
    '''
    def forward(self, x):
        self.image = x.requires_grad_()  # require grad for image
        return super(BackPropagation, self).forward(self.image)
    
    def generate(self):
        gradient = self.image.grad.clone()
        self.image.grad.zero_()  # clear
        return gradient  # get the gradient of image
    
class GradCAM(_BasicWrapper):
    ''' class for grad_cam method
    '''

    def __init__(self, model, candidate_layers=None):
        super(GradCAM, self).__init__(model)
        self.fmap_pool = {}  # store feature maps
        self.grad_pool = {}  # store gradients
        self.candidate_layers = candidate_layers  # which layers to be computed

        def save_fmaps(key):
            ''' hook for feature maps (forward)
            '''
            def forward_hook(module, input, output):
                self.fmap_pool[key] = output.detach()
            return forward_hook
        
        def save_grads(key):
            ''' hook for gradients (backward)
            '''
            def backward_hook(module, grad_in, grad_out):
                self.grad_pool[key] = grad_out[0].detach()
            return backward_hook

        # regitor all hooks for candidate layers
        # if candidate layers not set, registor hooks for all layers
        for name, module in self.model.named_modules():
            if self.candidate_layers is None or name in self.candidate_layers:
                self.handlers.append(module.register_forward_hook(save_fmaps(name)))
                self.handlers.append(module.register_backward_hook(save_grads(name)))
    
    def _find(self, pool, target_layer):
        ''' to get target_layer's feature map or gradient
        '''
        if target_layer in pool.keys():
            return pool[target_layer]
        else:
            raise ValueError("Invalid layer name: {}".format(target_layer))
    
    def generate(self, target_layer):
        ''' get grad_cap result for target layer
        '''
        fmaps = self._find(self.fmap_pool, target_layer)  # get feature maps
        grads = self._find(self.grad_pool, target_layer)  # get gradients
        weights = F.adaptive_avg_pool2d(grads, 1)  # convert gradients to weights

        gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)  # sum by weights, get gcam figure
        gcam = F.relu(gcam)  # filtered by relu
        gcam = F.interpolate(
            gcam, self.image_shape, mode='bilinear', align_corners=False
        )  # interpolation

        # standardize by min-max method, get all digits to (0, 1)
        B, C, H, W = gcam.shape
        gcam = gcam.view(B, -1)
        gcam -= gcam.min(dim=-1, keepdim=True)[0]
        gcam /= gcam.max(dim=1, keepdim=True)[0]
        gcam = gcam.view(B, C, H, W)  # standardize the data

        return gcam

def load_images(image_paths):
    ''' load all images from the path and save copies
    '''
    images = []
    raw_images = []
    print("Images:")  # print images to be loaded
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)  # go through preprocess
        images.append(image)
        raw_images.append(raw_image)
    return images, raw_images

def preprocess(image_path):
    '''  process image with standard transforms (like that in the dataset)
    '''
    raw_image = cv2.imread(image_path)
    raw_image = cv2.resize(raw_image, (224,) * 2)
    image = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5] * 3, std=[0.25] * 3),
        ]
    )(raw_image[..., ::-1].copy())  # stack to minibatch form and reorder RGB channels
    return image, raw_image

def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
    ''' save grad_cam result to image, together with raw image
    '''
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:  # to be printed
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
    cv2.imwrite(filename, np.uint8(gcam))  # save to image

if __name__ == "__main__":

    from model import vgg16_101
    from PIL import Image
    from utils.translate import get_class

    net = vgg16_101()
    net.load_state_dict(torch.load("output/model-vgg16.pth"))
    net.eval()
    
    image_paths = []
    for i in range(1, 7):
        image_path = path.join('data', 'test', '{}.jpg'.format(i))
        image_paths.append(image_path)
    images, raw_images = load_images(image_paths)
    images = torch.stack(images)

    bp = BackPropagation(net)
    preds, ids = bp.forward(images)  # to get gradients

    gcam = GradCAM(net)
    _ = gcam.forward(images)
    target_layer = 'features'
    topk = 3

    # create dir
    if path.isdir('output/grad_cam') == False:
        os.mkdir('output/grad_cam')

    for i in range(topk):

        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        # save to image
        for j in range(len(images)):

            save_gradcam(
                filename=path.join(
                    'output',
                    'grad_cam',
                    "grad_cam-{}-{}.png".format(j, get_class(ids[j, i]))
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j]
            )
