import torch
import torch.nn as nn

class VAE(nn.Module):
    def __init__(self):
        super(VAE, self).__init__()

        self.latent_dim = 2
        self.encoder = nn.Sequential(
            nn.Linear(784, 500),
            nn.ReLU(),
            nn.Linear(500, 500),
            nn.ReLU(),
        )
        self.fc_mu = nn.Linear(500, self.latent_dim)
        self.fc_var = nn.Linear(500, self.latent_dim)

        self.decoder = nn.Sequential(
            nn.Linear(self.latent_dim, 500),
            nn.ReLU(),
            nn.Linear(500, 500),
            nn.ReLU(),
            nn.Linear(500, 784),
            nn.Sigmoid()
        )

    def encode(self, x):
        h = self.encoder(x)
        mu = self.fc_mu(h)
        # 为了之后求kld时不会出现log里数值太小导致越界
        # 所以此处直接学log_var
        # log_var = log(sigma^2)
        log_var = self.fc_var(h)
        return mu, log_var

    def reparameterize(self, mu, log_var):
        """reparameterize trick"""
        sigma = torch.exp(0.5 * log_var)  # 乘以0.5本质是开根号
        # 生成正态分布 ε ~ N(0, 1)
        z = mu + sigma * torch.randn_like(sigma)
        return z

    def decode(self, z):
        return self.decoder(z)

    def forward(self, x):
        mu, log_var = self.encode(x)
        z = self.reparameterize(mu, log_var)
        x_hat = self.decode(z)
        return x_hat, mu, log_var

    def loss(self, x_hat, x, mu, log_var, loss_type='MSE'):

        criterion = nn.MSELoss()
        if loss_type == 'BCE':
            criterion = nn.BCELoss()
        recons_loss = criterion(x_hat, x)
        # KL分布：KL(p, q) = log(σ2/σ1) + [σ1^2 + (u1 - u2)^2] / 2σ2^2  - 1/2
        # p: ε ~ N(0, 1)  q: N(u, σ^2) => u2 = 0; σ2 = 1
        # KL(q(h)||p ~ N(0, 1)) = log(σ1^-1) + (σ1^2 + u1^2) / 2 - 1 / 2
        #                       = - 1/2 * (1 + log(σ1^2) - u1^2 - σ1^2)
        kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
        # kld_weight = 28 / 50000  # real_img.shape[0]/ self.num_train_imgs
        # kld_weight = 0.00025
        # 如果是val则kld_weight=1.0
        kld_weight = 1 / 100000  # 超参数，需要调节
        # kld_weight = 1.0
        loss = recons_loss + kld_weight * kld_loss

        return loss, recons_loss, kld_loss

    def sample(self, num_samples, device):
        """随机采样z，返回重构输出"""
        z = torch.randn(num_samples, self.latent_dim)
        z = z.to(device)
        samples = self.decode(z)
        return samples

    def reconstruct(self, x):
        """给定输入，返回重构输出"""
        return self.forward(x)[0]

    def generate(self, z):
        """给定编码后的z，返回重构输出"""
        x_hat = self.decode(z)
        return x_hat

    def transform(self, x):
        """给定输入x，返回编码z"""
        mu, log_var = self.encode(x)
        z = self.reparameterize(mu, log_var)
        return z