from collections import OrderedDict
from torch.autograd import grad
import torch
import torch.nn.functional as F

def input_jacobian(net, x, z_dim=512):
    """ Compute jacobians w.r.t. input. This function is similar to torch.autograd.functional.jacobian
        in later pytorch versions. 
    """
    #return torch.tensor([[-0.0076,  0.0652, -0.6814, -0.1304,  0.0380,  0.0209],
    #    [ 0.1377, -0.1365,  0.0166,  0.0208,  0.0012,  0.0416],
    #    [ 0.0043, -0.0935,  0.5993,  0.7976,  0.0121, -0.9074],
    #    [ 0.9891,  0.9813, -0.0368, -0.0505, -0.0056, -0.0307],
    #    [-0.0260,  0.0613,  0.4179,  0.5859, -0.2773, -0.4164],
    #    [ 0.0437,  0.0406,  0.0179,  0.0240,  0.9599,  0.0129]]).t().expand((len(x), 6, 6)).to(net.device)
    x.requires_grad_(True)
    _, _, out = net(x)
    J_list = []
    for k in range(z_dim):
        J = grad(out[:,k], x, torch.ones_like(out[:,k]), retain_graph=True)[0] #
        # print(J.shape)
        ### to save GRAM, sum the channel 
        J = torch.sum(J, dim=1, keepdim=True)
        
        J_list.append(J)
    jac = torch.stack(J_list, dim=1)
    return jac

def generator_jacobian(net, z):
    """ Compute jacobians w.r.t. input. This function is similar to torch.autograd.functional.jacobian
        in later pytorch versions. 
    """
    #return torch.tensor([[-0.0076,  0.0652, -0.6814, -0.1304,  0.0380,  0.0209],
    #    [ 0.1377, -0.1365,  0.0166,  0.0208,  0.0012,  0.0416],
    #    [ 0.0043, -0.0935,  0.5993,  0.7976,  0.0121, -0.9074],
    #    [ 0.9891,  0.9813, -0.0368, -0.0505, -0.0056, -0.0307],
    #    [-0.0260,  0.0613,  0.4179,  0.5859, -0.2773, -0.4164],
    #    [ 0.0437,  0.0406,  0.0179,  0.0240,  0.9599,  0.0129]]).t().expand((len(x), 6, 6)).to(net.device)
    z.requires_grad_(True)
    rec = net.decode(latent=z)
    rec_orgshape = rec.shape
    rec = rec.reshape(len(rec), -1)
    J_list = []
    for k in range(rec.size(1)):
        J = grad(rec[:,k], z, torch.ones_like(rec[:,k]), retain_graph=True)[0]
        #print(J.shape)
        J_list.append(J)
    jac = torch.stack(J_list, dim=1)
    jac = jac.transpose(1,2)
    # print(jac.shape)
    return jac.reshape(jac.size(0), -1 , *rec_orgshape[1:])

def generator_jacobian_fds(net, z, delta=0.3):
    """ z: (B, N) latent variables.
        Approximate derivate by finite differences.
    """
    jac_list = []
    for dim in range(z.size(1)):
        z_plus = z.clone()
        z_plus[:,dim] += delta
        z_minus = z.clone()
        z_minus[:,dim] -= delta
        z_in = torch.cat([z_plus, z_minus], dim=0)
        print(z_in.shape)
        rec = net.decode(latent=z_in)
        rplus, rminus = torch.chunk(rec, 2, dim=0)
        jac_list.append((rplus-rminus)/(2.0*delta))
    return torch.stack(jac_list, dim=1)


def decoder_change(net, x, delta= 0.3):
    latent = net.encode_deterministic(images=x)
    # Compute the gradient of the generator.
    #return generator_jacobian(net, latent)
    return generator_jacobian_fds(net, latent, delta)
    

def integrated_gradients(net, x, baseline, device, z_dim=512, n_steps = 20):
    """ Return the IG attributions. """
    steps = torch.linspace(0, 1, n_steps).to(device)
    baseline = baseline.to(device)
    n_samples = len(x)
    if len(baseline.shape) == 3:
        baseline = baseline.unsqueeze(0)

    interps = (x.reshape(-1,x.size(0), x.size(1), x.size(2), x.size(3))*steps.reshape(-1, 1, 1, 1, 1)) + (1-steps.reshape(-1,1,1,1,1))*baseline
    interps = interps.contiguous()
    old_shape = interps.shape
    # print(interps.shape)
    # flatten inputs to compute model gradients.
    interps = interps.reshape(-1, interps.size(2), interps.size(3), interps.size(4))

    jacs = input_jacobian(net, interps, z_dim)
    #print(jacs.shape)
    #ig = jacs.reshape(old_shape[0], old_shape[1], -1, old_shape[2], old_shape[3], old_shape[4]).sum(dim=0)
    ig = jacs.reshape(old_shape[0], old_shape[1], -1, jacs.shape[-3], jacs.shape[-2], jacs.shape[-1]).sum(dim=0)

    return ig

def smoothgrad_gradients(net, x, device, n_samples = 20, noise_lvl = 0.02):
    """ Return the smoothgrad attributions. """
    noise = noise_lvl*torch.randn(n_samples, *x.shape, device=device)
    interps = x.reshape(-1, x.size(0), x.size(1), x.size(2), x.size(3)) + noise
    interps = interps.contiguous()
    old_shape = interps.shape
    print(interps.shape)
    # flatten inputs to compute model gradients.
    interps = interps.reshape(-1, interps.size(2), interps.size(3), interps.size(4))

    jacs = input_jacobian(net, interps)
    print(jacs.shape)
    sg = jacs.reshape(old_shape[0], old_shape[1], -1, old_shape[2], old_shape[3], old_shape[4]).sum(dim=0)
    return sg


# -- Code modified from source: https://github.com/kazuto1011/grad-cam-pytorch
class _BaseWrapper(object):
    """
    Please modify forward() and backward() depending on your task.
    """
    def __init__(self, model):
        super(_BaseWrapper, self).__init__()
        self.device = model.device
        self.model = model
        self.handlers = []  # a set of hook function handlers

    def generate(self):
        raise NotImplementedError

    def forward(self, image):
        """
        Simple classification
        """
        self.model.zero_grad()
        # out = self.model.encode_deterministic(images=image)
        # return out
        self.logits = self.model(image)
        self.probs = F.softmax(self.logits, dim=1)
        return list(zip(*self.probs.sort(0, True)))  # element: (probability, index)


class GradCam(_BaseWrapper):
    def __init__(self, model, candidate_layers=[]):
        super(GradCam, self).__init__(model)
        self.fmap_pool = OrderedDict()
        self.grad_pool = OrderedDict()
        self.candidate_layers = candidate_layers

        def forward_hook(module, input, output):
            self.fmap_pool[id(module)] = output.detach()

        def backward_hook(module, grad_in, grad_out):
            # print(grad_out[0].shape)
            self.grad_pool[id(module)] = grad_out[0].detach()

        for module in self.model.model.named_modules():
            # print(module[0]) ## print out module names
            if len(self.candidate_layers) == 0 or module[0] in self.candidate_layers:
                self.handlers.append(module[1].register_forward_hook(forward_hook))
                self.handlers.append(module[1].register_backward_hook(backward_hook))

    def find(self, pool, target_layer):
        # --- Query the right layer and return it's value.
        for key, value in pool.items():
            for module in self.model.model.named_modules():
                if id(module[1]) == key:
                    if module[0] == target_layer:
                        return value
        raise ValueError(f"Invalid Layer Name: {target_layer}")

    def normalize(self, grads):
        l2_norm = torch.sqrt(torch.mean(torch.pow(grads ,2))) + 1e-5
        return grads /l2_norm

    def compute_grad_weights(self, grads):
        grads = self.normalize(grads)
        return F.adaptive_avg_pool2d(grads, 1)


    def generate(self, target_layer):
        fmaps = self.find(self.fmap_pool, target_layer)
        grads = self.find(self.grad_pool, target_layer)
        weights = self.compute_grad_weights(grads)

        gcam = (fmaps * weights).sum(dim=1, keepdim=True)
        gcam = torch.clamp(gcam, min=0.0)

        gcam -= gcam.min()
        gcam /= (gcam.max() + 1e-5)
        return gcam
    
    def remove_hooks(self):
        for fh in self.handlers:
            fh.remove()


def grad_cam(net, x):
    net.model.requires_grad_(True)
    target_layers = ['encoder.main.10']
    # target_layers = []
    gcam = GradCam(model=net, candidate_layers=target_layers)
    out = net.encode_deterministic(images=x)
    # fmaps = gcam.find(gcam.fmap_pool, target_layers[0])
    J_list = []
    for k in range(net.z_dim):
        one_hot = torch.zeros((out.shape[0], out.shape[1])).float()
        one_hot[:,k]=1
        out.backward(gradient=one_hot.cuda(), retain_graph=True)
        gcam_out = gcam.generate(target_layers[0])
        gcam_out = F.upsample(gcam_out, (x.shape[-2], x.shape[-1]), mode='bilinear')
        J_list.append(gcam_out)
    jac = torch.stack(J_list, dim=1)
    gcam.remove_hooks()
    return jac


def calc_mean_feature_value(data_loader, iters=50):
    """ Calculate an average feature to use as a baseline for IG. """
    x_batch_list = []
    for i, data in enumerate(data_loader):
        if i == iters:
            break
        x, _, _ = data
        x_batch_list.append(x)
    x_batch_list = torch.stack(x_batch_list, dim=0)
    print(f"Calculating feature mean using {len(x_batch_list)} samples.")
    return x_batch_list.mean(dim=0)