# 这是一个开头
# 人员：Mr Su
# 开发时间：16/2/2021下午4:05
# 文件名：model.py
# 开发工具：PyCharm

from torch import nn
import torch
from collections import OrderedDict

class Encoder(nn.Module):
    def __init__(self,input_dim=600,inter_dims=500,hid_dim=400):
        super(Encoder,self).__init__()

        self.encoder=nn.Sequential(
            nn.Linear(600,512),
            nn.ReLU(),
        )

        self.mu_l=nn.Linear(512,400)
        self.log_sigma2_l=nn.Linear(512,400)

    def forward(self, x):

        e=self.encoder(x)

        mu=self.mu_l(e)
        log_sigma2=self.log_sigma2_l(e)

        return mu,log_sigma2

class Decoder(nn.Module):

    def __init__(self):
        super(Decoder,self).__init__()

        self.decoder=nn.Sequential(
            nn.Linear(400,512),
            nn.ReLU(),
            nn.Linear(512,600),
            nn.Sigmoid()
        )

        # self.mu_2=nn.Linear(512,600)
        #
        # self.log_sigma2_2=nn.Linear(512,600)

    def forward(self, z):

        x_rec=self.decoder(z)
        # mu_x_rec=self.mu_2(x_rec)
        # log_sigma2_x_rec=self.log_sigma2_2(x_rec)
        # return mu_x_rec,log_sigma2_x_rec
        return x_rec

class VAE(nn.Module):
    def __init__(self):
        super(VAE,self).__init__()
        self.encoder=Encoder()
        self.decoder=Decoder()
        self.Loss = nn.MSELoss()

    def forward(self, x):

        mu_z,log_std_z = self.encoder(x)

        z = torch.randn_like(mu_z)*torch.exp(log_std_z) + mu_z

        x_rec = self.decoder(z)

        return z,x_rec

    def loss(self,x):
        mu_z, log_std_z = self.encoder(x)
        # print(f'sigma:{torch.mean(torch.sum(torch.exp(log_std_z),dim=1))}')
        # print(f'sigma:{torch.mean(torch.exp(log_std_z))}')
        z = torch.randn_like(mu_z) * torch.exp(log_std_z) + mu_z

        sigma = torch.mean(torch.exp(log_std_z))

        x_rec = self.decoder(z)
        # #
        # z,x_rec = self.forward(x)
        recon_loss = 10*self.Loss(x,x_rec)

        # mu_x, log_std_x = self.decoder(z)

        # recon_loss = 0.5 * np.log(2 * np.pi) + log_std_x + \
        #              (x - mu_x) ** 2 * torch.exp(-2 * log_std_x) * 0.5
        # recon_loss =  recon_loss.sum(1).mean()



        kl_loss = -log_std_z - 0.5 + (torch.exp(2 * log_std_z) + mu_z ** 2) * 0.5 # kl_loss 与论文方法一致
        kl_loss = 0.0001*kl_loss.sum(1).mean()

        return OrderedDict(loss=recon_loss + kl_loss, recon_loss=recon_loss,
                           kl_loss=kl_loss),sigma
    def sample(self,datasets):

        if datasets.dtype is not torch.float32:
            datasets = datasets.detach().float()

        mu_z, log_std_z = self.encoder(datasets)

        # z = torch.randn_like(mu_z) * torch.exp(log_std_z) + mu_z
        z = mu_z
        return z