import torch
from torch import nn


class Encoder(nn.Module):
    '''
        编码器：3个卷积层下采样，隐变量为10维
    '''

    def __init__(self):
        super().__init__()
        self.layer1 = nn.Conv2d(1,
                                16,
                                kernel_size=3,
                                stride=1,
                                padding=0,
                                bias=False)  # [b, 16, 26, 26]
        self.layer2 = nn.Conv2d(16,
                                32,
                                kernel_size=2,
                                stride=2,
                                padding=0,
                                bias=False)  # [b, 32, 13, 13]
        self.layer3 = nn.Conv2d(32,
                                64,
                                kernel_size=3,
                                stride=2,
                                padding=0,
                                bias=False)  # [b, 64, 6, 6]
        self.mean = nn.Conv2d(64,
                              10,
                              kernel_size=6,
                              stride=1,
                              padding=0,
                              bias=False)  # [b, 10, 1, 1]
        self.std = nn.Conv2d(64,
                             10,
                             kernel_size=6,
                             stride=1,
                             padding=0,
                             bias=False)  # [b, 10, 1, 1]
        self.act = nn.ReLU()

    def forward(self, x):  # [b, 1, 28, 28]
        x = self.layer1(x)  # [b, 16, 26, 26]
        x = self.act(x)
        x = self.layer2(x)  # [b, 32, 13, 13]
        x = self.act(x)
        x = self.layer3(x)  # [b, 64, 6, 6]
        x = self.act(x)
        mean = self.mean(x)  # [b, 10, 1, 1]
        std = self.std(x)  # [b, 10, 1, 1]
        return mean.reshape(mean.shape[:2]), \
            torch.exp(std).reshape(std.shape[:2])  # ([b, 10], [b, 10])


class Decoder(nn.Module):
    '''
        解码器：3个全连接层，未使用转置卷积
    '''

    def __init__(self):
        super().__init__()
        self.layer1 = nn.Linear(10, 64)  # [b, 64]
        self.layer2 = nn.Linear(64, 256)  # [b, 256]
        self.layer3 = nn.Linear(256, 28 * 28)  # [b, 28 * 28]
        self.act1 = nn.ReLU()
        self.act2 = nn.Sigmoid()

    def forward(self, x):  # [b, 10]
        x = self.layer1(x)  # [b, 64]
        x = self.act1(x)
        x = self.layer2(x)  # [b, 256]
        x = self.act1(x)
        x = self.layer3(x)  # [b, 28 * 28]
        x = self.act2(x)
        return x.reshape(-1, 1, 28, 28)  # [b, 1, 28, 28]


class VAE(nn.Module):
    '''
        VAE模型构建
    '''

    def __init__(self):
        super().__init__()
        self.encoder = Encoder()
        self.decoder = Decoder()

    def forward(self, x):
        '''
            返回encoder和decoder输出
        '''
        mean, std = self.encoder(x)
        z = self.SampleMultiGaussian(mean, std)
        x = self.decoder(z)
        return x, mean, std

    def SampleMultiGaussian(self, mean, std):
        '''
            利用重参数技巧进行采样
        '''
        device = self.encoder.layer1._parameters["weight"].device
        e = torch.randn(mean.shape, requires_grad=False).to(device)
        return mean + std * e

    def vae_loss(self, x, x_, mean, std):
        '''
            SGVB
        '''
        mse = nn.MSELoss(reduction='sum')
        loss1 = mse(x, x_)
        loss2 = -0.5 * ((1 + 2 * torch.log(std)) - torch.pow(mean, 2) -
                        torch.pow(std, 2)).sum()
        return loss1 + loss2
