import torch
from torch import nn
from torch.nn import functional as f

# 定义VAE编码器：将输入数据映射到一组均值和方差
class VAE_Encoder(nn.Module):
    '''
    输入维度设定为128/256,相应的处理隐藏维度
    目前按照128的输入维度进行处理
    # 可以探索不同隐藏维度带来的影响
    '''
    def __init__(self):
        super(VAE_Encoder, self).__init__()
        # 假定使用卷积进行U-NET的降维
        self.encode=nn.Sequential(
            # nn.Conv2d(in_channels=3, out_channels=9, kernel_size=2, stride=2),
            # nn.Sigmoid(),
            # nn.Conv2d(in_channels=9, out_channels=81, kernel_size=2, stride=2),
            # nn.Sigmoid(),
            # nn.Conv2d(in_channels=81, out_channels=9, kernel_size=2, stride=2),
            # nn.Sigmoid(),
            # nn.Conv2d(in_channels=9, out_channels=3, kernel_size=2, stride=2),
            # nn.Sigmoid()
            # 改成使用全连接
            nn.Linear(in_features=3*128*128, out_features=64*64),
            nn.ReLU(),
            nn.Linear(in_features=64*64, out_features=32*32),
            nn.ReLU(),
            nn.Linear(in_features=32*32, out_features=16*16),
            nn.ReLU(),
            nn.Linear(in_features=16*16, out_features=64*3),
            nn.ReLU()
        )
        # 最后的两个映射
        self.mean_net=nn.Linear(in_features=64*3, out_features=16)
        self.var_net=nn.Linear(in_features=64*3, out_features=16)
        
    def forward(self, input):
        x=self.encode(input.view(input.size(0),-1))
        # 映射出均值
        mean=self.mean_net(x.view(x.size(0), -1))
        # 映射出方差
        var = f.softplus(self.var_net(x.view(x.size(0), -1)))  # 保证方差为正
        return mean, var
    
class VAE_Decoder(nn.Module):
    '''
    解码器：将latent code重建成原始数据
    小心图片的3维特性
    '''
    def __init__(self):
        super(VAE_Decoder, self).__init__()
        # 假定使用反卷积进行U-NET的升维
        self.decode=nn.Sequential(
            # nn.ConvTranspose2d(in_channels=3, out_channels=9, kernel_size=2, stride=2),
            # nn.Sigmoid(),
            # nn.ConvTranspose2d(in_channels=9, out_channels=81, kernel_size=2, stride=2),
            # nn.Sigmoid(),
            # nn.ConvTranspose2d(in_channels=81, out_channels=9, kernel_size=2, stride=2),
            # nn.Sigmoid(),
            # nn.ConvTranspose2d(in_channels=9, out_channels=3, kernel_size=2, stride=2),
            # nn.Sigmoid()
            nn.Linear(64*3,16*16),
            nn.ReLU(),
            nn.Linear(16*16,32*32),
            nn.ReLU(),
            nn.Linear(32*32,64*64),
            nn.ReLU(),
            nn.Linear(64*64,3*128*128)
        )
        # 应该有线性映射层
        self.fc=nn.Linear(in_features=16, out_features=64*3)
        
    def forward(self, mean, var, device='cuda'if torch.cuda.is_available()else'cpu'):
        # 首先构建均值-方差向量
        z=self.reparameterization(mu=mean, log_var=var, device=device)
        z=z.to(device)
        # 解码器重构数据
        x=self.fc(z.view(z.size(0), -1))
        #x=self.decode(x.view(x.size(0), 3, 8, 8))
        x=self.decode(x)
        x=x.view(x.size(0),3,128,128)
        return x
    
    def reparameterization(self, mu, log_var, device='cuda'if torch.cuda.is_available()else'cpu'):
        sigma = torch.exp(log_var * 0.5)
        sigma=sigma.to(device)
        eps = torch.randn_like(sigma)
        eps=eps.to(device)
        return mu + sigma * eps  # 这里的“*”是点乘的意思
        
class VAE(nn.Module):
    def __init__(self):
        super(VAE, self).__init__()
        self.encoder=VAE_Encoder()
        self.decoder=VAE_Decoder()
    
    def forward(self, input):
        mean, var=self.encoder(input)
        x=self.decoder(mean, var)
        return x, mean, var