from torch import nn
import torch
from torch import optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torchvision.utils import save_image
import matplotlib.pyplot as plt
import os
import shutil
import numpy as np

from artear2_3.dataset import ParamsDataset


class VAE(nn.Module):

    def __init__(self, input_dim=784, h_dim=400, z_dim=20):
        # 调用父类方法初始化模块的state
        super(VAE, self).__init__()

        self.input_dim = input_dim
        self.h_dim = h_dim
        self.z_dim = z_dim

        # 编码器 ： [b, input_dim] => [b, z_dim]
        self.fc1 = nn.Linear(input_dim, h_dim)  # 第一个全连接层
        self.fc2 = nn.Linear(h_dim, z_dim)  # mu
        self.fc3 = nn.Linear(h_dim, z_dim)  # log_var

        # 解码器 ： [b, z_dim] => [b, input_dim]
        self.fc4 = nn.Linear(z_dim, h_dim)
        self.fc5 = nn.Linear(h_dim, input_dim)

    def forward(self, x):
        """
        向前传播部分, 在model_name(inputs)时自动调用
        :param x: the input of our training model [b, batch_size, 1, 28, 28]
        :return: the result of our training model
        """
        batch_size = x.shape[0]  # 每一批含有的样本的个数
        # flatten  [b, batch_size, 1, 28, 28] => [b, batch_size, 784]
        # tensor.view()方法可以调整tensor的形状，但必须保证调整前后元素总数一致。view不会修改自身的数据，
        # 返回的新tensor与原tensor共享内存，即更改一个，另一个也随之改变。
        x = x.view(batch_size, self.input_dim)  # 一行代表一个样本

        # encoder
        mu, log_var = self.encode(x)
        # reparameterization trick
        sampled_z = self.reparameterization(mu, log_var)
        # decoder
        x_hat = self.decode(sampled_z)
        # reshape
        x_hat = x_hat.view(batch_size, self.input_dim)
        return x_hat, mu, log_var

    def encode(self, x):
        """
        encoding part
        :param x: input image
        :return: mu and log_var
        """
        h = F.relu(self.fc1(x))
        mu = self.fc2(h)
        log_var = self.fc3(h)

        return mu, log_var

    def reparameterization(self, mu, log_var):
        """
        Given a standard gaussian distribution epsilon ~ N(0,1),
        we can sample the random variable z as per z = mu + sigma * epsilon
        :param mu:
        :param log_var:
        :return: sampled z
        """
        sigma = torch.exp(log_var * 0.5)
        eps = torch.randn_like(sigma)
        return mu + sigma * eps  # 这里的“*”是点乘的意思

    def decode(self, z):
        """
        Given a sampled z, decode it back to image
        :param z:
        :return:
        """
        h = F.relu(self.fc4(z))
        x_hat = torch.sigmoid(self.fc5(h))  # 图片数值取值为[0,1]，不宜用ReLU
        return x_hat



# plt.style.use("ggplot")

# 设置模型运行的设备
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")


def loss_function(x_hat, x, mu, log_var):
    # 1. the reconstruction loss.
    # We regard the MNIST as binary classification
    BCE = F.binary_cross_entropy(x_hat, x, reduction='sum')

    # 2. KL-divergence
    # D_KL(Q(z|X) || P(z)); calculate in closed form as both dist. are Gaussian
    # here we assume that \Sigma is a diagonal matrix, so as to simplify the computation
    KLD = 0.5 * torch.sum(torch.exp(log_var) + torch.pow(mu, 2) - 1. - log_var)

    # 3. total loss
    loss = BCE + KLD
    return loss, BCE, KLD


def save_checkpoint(state, is_best, outdir):
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    checkpoint_file = os.path.join(outdir, 'checkpoint.pth')  # join函数创建子文件夹，也就是把第二个参数对应的文件保存在'outdir'里
    best_file = os.path.join(outdir, 'model_best.pth')
    torch.save(state, checkpoint_file)  # 把state保存在checkpoint_file文件夹中
    if is_best:
        shutil.copyfile(checkpoint_file, best_file)


"""def test(model, optimizer, mnist_test, epoch, best_test_loss):
    test_avg_loss = 0.0
    with torch.no_grad():  # 这一部分不计算梯度，也就是不放入计算图中去
        '''测试测试集中的数据'''
        # 计算所有batch的损失函数的和
        for test_batch_index, (test_x, _) in enumerate(mnist_test):
            test_x = test_x.to(device)
            # 前向传播
            test_x_hat, test_mu, test_log_var = model(test_x)
            # 损害函数值
            test_loss, test_BCE, test_KLD = loss_function(test_x_hat, test_x, test_mu, test_log_var)
            test_avg_loss += test_loss

        # 对和求平均，得到每一张图片的平均损失
        test_avg_loss /= len(mnist_test.dataset)

        '''测试随机生成的隐变量'''
        # 随机从隐变量的分布中取隐变量
        z = torch.randn(batch_size, z_dim).to(device)  # 每一行是一个隐变量，总共有batch_size行
        # 对隐变量重构
        random_res = model.decode(z).view(-1, 1, 28, 28)
        # 保存重构结果
        save_image(random_res, './%s/random_sampled-%d.png' % (result_dir, epoch + 1))

        '''保存目前训练好的模型'''
        # 保存模型
        is_best = test_avg_loss < best_test_loss
        best_test_loss = min(test_avg_loss, best_test_loss)
        save_checkpoint({
            'epoch': epoch,  # 迭代次数
            'best_test_loss': best_test_loss,  # 目前最佳的损失函数值
            'state_dict': model.state_dict(),  # 当前训练过的模型的参数
            'optimizer': optimizer.state_dict(),
        }, is_best, save_dir)

        return best_test_loss"""


def main():
    batch_size = 256
    result_dir = './VAEResult'
    z_dim = 100
    lr = 1e-3
    epochs = 400
    
    cwd = 'C:/dev_spa/DMuse/202202b3'
    
    config = {
            'dataset_config': {
                'train_sample_dir': cwd,
                'test_sample_dir': cwd,
                'train_label_dir': cwd,
                'test_label_dir': cwd,
                'compress_rate': 30.,
                'n_spaces': 30,
            },
            'batch_size': 128,
            'shuffle': True,
        }
    
    
    # Step 1: 载入数据
    dataset = ParamsDataset(mode='train', **config['dataset_config'])
 
    dataloader = torch.utils.data.DataLoader(
        dataset=dataset, batch_size=config['batch_size'], shuffle=True
    )

    # 查看每一个batch图片的规模
    x = iter(dataloader).__next__()[0]  # 取出第一批(batch)训练所用的数据集
    print(' img : ', x.shape)  # img :  torch.Size([batch_size, 1, 28, 28])， 每次迭代获取batch_size张图片，每张图大小为(1,28,28)

    # Step 2: 准备工作 : 搭建计算流程
    model = VAE(input_dim=dataset.n_params, z_dim=z_dim).to(device)  # 生成AE模型，并转移到GPU上去
    print('The structure of our model is shown below: \n')
    print(model)
    optimizer = optim.Adam(model.parameters(), lr=lr)  # 生成优化器，需要优化的是model的参数，学习率为0.001

    # Step 3: optionally resume(恢复) from a checkpoint
    start_epoch = 0
    best_test_loss = np.finfo('f').max
    '''if resume:
        if os.path.isfile(resume):
            # 载入已经训练过的模型参数与结果
            print('=> loading checkpoint %s' % resume)
            checkpoint = torch.load(resume)
            start_epoch = checkpoint['epoch'] + 1
            best_test_loss = checkpoint['best_test_loss']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print('=> loaded checkpoint %s' % resume)
        else:
            print('=> no checkpoint found at %s' % resume)'''

    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    # Step 4: 开始迭代
    loss_epoch = []
    for epoch in range(start_epoch, epochs):

        loss_batch = []
        for batch_index, (x,) in enumerate(dataloader):
            # x : [b, 1, 28, 28], remember to deploy the input on GPU
            x = x.to(device)

            # 前向传播
            x_hat, mu, log_var = model(x)  # 模型的输出，在这里会自动调用model中的forward函数
            loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)  # 计算损失值，即目标函数
            loss_batch.append(loss.item())  # loss是Tensor类型

            # 后向传播
            optimizer.zero_grad()  # 梯度清零，否则上一步的梯度仍会存在
            loss.backward()  # 后向传播计算梯度，这些梯度会保存在model.parameters里面
            optimizer.step()  # 更新梯度，这一步与上一步主要是根据model.parameters联系起来了

            # print statistics every 100 batch
            if batch_index % 5 == 0:
                print('Epoch [{}/{}], Batch [{}/{}] : Total-loss = {:.4f}, BCE-Loss = {:.4f}, KLD-loss = {:.4f}'
                      .format(epoch + 1, epochs, batch_index + 1, len(dataloader.dataset) // batch_size,
                              loss.item() / batch_size, BCE.item() / batch_size,
                              KLD.item() / batch_size))

        # 把这一个epoch的每一个样本的平均损失存起来
        loss_epoch.append(np.sum(loss_batch) / len(dataloader.dataset))

        '''# 测试模型
        if (epoch + 1) % test_every == 0:
            best_test_loss = test(model, optimizer, mnist_test, epoch, best_test_loss)'''
    return loss_epoch, model


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    loss_epoch, model = main()
    
    torch.save(model.state_dict(), './vae.pth')
    
    # 绘制迭代结果
    plt.plot(loss_epoch)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()

