import torch.nn as nn
import torch


class LossComputeWithModule(object):
    def __init__(self, loss_type='normal'):
        # self.bce_loss = nn.BCEWithLogitsLoss()
        self.bce_loss = nn.BCELoss()
        # self.bce_loss=nn.MSELoss()
        self.loss_type = loss_type

    def __call__(self, d_out_real, d_out_fake):
        """
        Call to compute loss
        :param x: predicted value
        :param y: actual value
        :return:
        """
        if self.loss_type == 'standard':  # the non-satuating GAN loss
            d_loss_real = self.bce_loss(d_out_real, torch.ones_like(d_out_real))
            d_loss_fake = self.bce_loss(d_out_fake, torch.zeros_like(d_out_fake))
            d_loss = d_loss_real + d_loss_fake

            g_loss = self.bce_loss(d_out_fake, torch.ones_like(d_out_fake))

        elif self.loss_type == 'JS':  # the vanilla GAN loss
            d_loss_real = self.bce_loss(d_out_real, torch.ones_like(d_out_real))
            d_loss_fake = self.bce_loss(d_out_fake, torch.zeros_like(d_out_fake))
            d_loss = d_loss_real + d_loss_fake

            g_loss = -d_loss_fake

        elif self.loss_type == 'KL':  # the GAN loss implicitly minimizing KL-divergence
            d_loss_real = self.bce_loss(d_out_real, torch.ones_like(d_out_real))
            d_loss_fake = self.bce_loss(d_out_fake, torch.zeros_like(d_out_fake))
            d_loss = d_loss_real + d_loss_fake

            g_loss = torch.mean(-d_out_fake)

        elif self.loss_type == 'hinge':  # the hinge loss
            d_loss_real = torch.mean(nn.ReLU(1.0 - d_out_real))
            d_loss_fake = torch.mean(nn.ReLU(1.0 + d_out_fake))
            d_loss = d_loss_real + d_loss_fake

            g_loss = -torch.mean(d_out_fake)

        elif self.loss_type == 'tv':  # the total variation distance
            d_loss = torch.mean(nn.Tanh(d_out_fake) - nn.Tanh(d_out_real))
            g_loss = torch.mean(-nn.Tanh(d_out_fake))

        elif self.loss_type == 'rsgan':  # relativistic standard GAN
            d_loss = self.bce_loss(d_out_real - d_out_fake, torch.ones_like(d_out_real))
            g_loss = self.bce_loss(d_out_fake - d_out_real, torch.ones_like(d_out_fake))

        elif self.loss_type == 'normal':
            d_loss = self.bce_loss(d_out_real, d_out_fake)
            # g_loss = self.bce_loss(d_out_real, d_out_fake)
            g_loss = 0

        else:
            raise NotImplementedError("Divergence '%s' is not implemented" % self.loss_type)

        return g_loss, d_loss
