import torch
from torch import nn


def KL_between_normals(q_distr, p_distr):
    mu_q, sigma_q = q_distr
    mu_p, sigma_p = p_distr
    k = mu_q.size(1)

    mu_diff = mu_p - mu_q
    mu_diff_sq = torch.mul(mu_diff, mu_diff)
    logdet_sigma_q = torch.sum(2 * torch.log(torch.clamp(sigma_q, min=1e-8)), dim=1)
    logdet_sigma_p = torch.sum(2 * torch.log(torch.clamp(sigma_p, min=1e-8)), dim=1)

    fs = torch.sum(torch.div(sigma_q ** 2, sigma_p ** 2), dim=1) + torch.sum(torch.div(mu_diff_sq, sigma_p ** 2), dim=1)
    two_kl = fs - k + logdet_sigma_p - logdet_sigma_q
    return two_kl * 0.5


class VIB(nn.Module):
    def __init__(self, encoder_output,decoder_logits, dimZ=256, beta=1e-3, num_samples=10):
        # the dimension of Z
        super().__init__()

        self.beta = beta
        self.dimZ = dimZ
        self.num_samples = num_samples
        self.encoder_output = encoder_output
        self.decoder_logits = decoder_logits

    def gaussian_noise(self, num_samples, K):
        # works with integers as well as tuples
        return torch.normal(torch.zeros(*num_samples, K), torch.ones(*num_samples, K)).cuda()

    def sample_prior_Z(self, num_samples):
        return self.gaussian_noise(num_samples=num_samples, K=self.dimZ)

    def encoder_result(self, batch):
        mu = self.encoder_output[:, :self.dimZ]
        sigma = torch.nn.Softplus(self.encoder_output[:, self.dimZ:])

        return mu, sigma

    def sample_encoder_Z(self, num_samples, batch):
        batch_size = batch.size()[0]
        mu, sigma = self.encoder_result(batch)

        return mu + sigma * self.gaussian_noise(num_samples=(num_samples, batch_size), K=self.dimZ)

    def forward(self, batch_x):
        batch_size = batch_x.size()[0]

        # sample from encoder
        encoder_Z_distr = self.encoder_result(batch_x)
        to_decoder = self.sample_encoder_Z(num_samples=self.num_samples, batch=batch_x)

        decoder_logits_mean = torch.mean(self.decoder_logits(to_decoder), dim=0)

        return decoder_logits_mean

    def batch_loss(self, num_samples, batch_x, batch_y):
        batch_size = batch_x.size()[0]

        prior_Z_distr = torch.zeros(batch_size, self.dimZ).cuda(), torch.ones(batch_size, self.dimZ).cuda()
        encoder_Z_distr = self.encoder_result(batch_x)

        I_ZX_bound = torch.mean(KL_between_normals(encoder_Z_distr, prior_Z_distr))

        to_decoder = self.sample_encoder_Z(num_samples=self.num_samples, batch=batch_x)

        decoder_logits = self.decoder_logits(to_decoder)
        # batch should go first
        decoder_logits = decoder_logits.permute(1, 2, 0)

        loss = nn.CrossEntropyLoss(reduce=False)
        cross_entropy_loss = loss(decoder_logits, batch_y[:, None].expand(-1, num_samples))

        # estimate E_{eps in N(0, 1)} [log q(y | z)]
        cross_entropy_loss_montecarlo = torch.mean(cross_entropy_loss, dim=-1)

        minusI_ZY_bound = torch.mean(cross_entropy_loss_montecarlo, dim=0)

        return torch.mean(minusI_ZY_bound + self.beta * I_ZX_bound), -minusI_ZY_bound, I_ZX_bound