import torch
import torch.optim as optim
import torch.nn.functional as F
from matplotlib import pyplot as plt

from data_manager import DataManager
from models_manger import VAEModel

torch.manual_seed(0)
"""
控制随机初始化
    影响 权重初始化（如 nn.Linear、nn.Conv2d 的初始参数）。
    影响 数据打乱（如 DataLoader(shuffle=True)）。
    影响 Dropout、随机增强（如 transforms.RandomCrop）等涉及随机性的操作。"""

num_samples = 12


# 定义HAE的损失雨数，其中包含重构误差和散度
def loss_function(recon_x, x, mu, log_var):
    # 重构误差，使用二元交叉搞损失的数
    BCE = F.binary_cross_entropy(recon_x, x.view(-1, 28 * 28), reduction='sum')
    # 肌散度，计算高斯分布之间的散度
    # 详见VAE论文中的AppendixB
    # Kingma and Welling, Auto-Encoding lariational Bayes, IClk, 20l4
    # https://arxiv,org/abs/1312.6114
    # 0,5*sum(1+log(sigma2)-mu2-sigma 2)
    KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
    # 将重构误差和配散度相加作为总损失
    return BCE + KLD


def test():
    data_manager = DataManager("mnist", 64)
    model = VAEModel(28 * 28, 512, 256, 2)
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    train_loader, test_loader = data_manager.get_data_loader()

    epochs = 20
    # SGD优化器
    device = torch.device("cuda:0")
    # 定义训练轮数n_epochs =20
    # 循环开始训练
    model.to(device)
    for epoch in range(epochs):
        # 进入训练模式
        model.train()
        train_loss = 0
        # 遍历训练数据集
        for batch_idx, (data, _) in enumerate(train_loader):
            data = data.to(device)
            optimizer.zero_grad()
            # 前向传播，计算重构误差和KL散度
            recon_batch, mu, log_var, z = model(data)
            loss = loss_function(recon_batch, data, mu, log_var)
            # 反向传播，记示损失值，更新模型参数
            loss.backward()
            train_loss += loss.item()
            optimizer.step()
        # 输出平均损失
        print('====>Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset)))
        # 进入评估极式
        model.eval()
        # #生成新样本
        with torch.no_grad():
            # 随机生成正态分布、并使用解码器将采样结果转换为新的样本
            z = torch.randn(num_samples, 2).to(device)
            sample = model.decoder(z).cpu()
            images = sample.view(num_samples, 28, 28).numpy()
            # 可视化生成的样本
            fig, axs = plt.subplots(1, num_samples, figsize=(10, 10))
            for i in range(num_samples):
                axs[i].imshow(images[i], cmap="gray")
                axs[i].axis("off")
            plt.show()


if __name__ == '__main__':
    test()
