import os
import time
import gzip
import pickle
import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset

from VAE import VAE


def gpu_setup(use_gpu, gpu_id):
    """GPU设置"""
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if torch.cuda.is_available() and use_gpu:
        print('cuda available with GPU:', torch.cuda.get_device_name(0))
        device = torch.device("cuda")
    else:
        print('cuda not available')
        device = torch.device("cpu")
    return device


def load_data():
    """加载数据集"""
    data_path = "../data/mnist.pkl.gz"

    with gzip.open(data_path, "rb") as f:
        ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")

    x_train, y_train, x_valid, y_valid = map(
        torch.tensor, (x_train, y_train, x_valid, y_valid)
    )

    return x_train, y_train, x_valid, y_valid


def train_epoch(model, optimizer, device, data_loader):
    """训练一个epoch"""
    model.train()
    iter = 0
    epoch_loss = 0
    epoch_kld = 0
    epoch_recons_loss = 0
    epoch_kld_list = []  # 方便查看KL分布情况
    for iter, (x, _) in enumerate(data_loader):
        x = x.to(device)
        x_hat, mu, log_var = model.forward(x)
        loss, recons_loss, kld = model.loss(x_hat, x, mu, log_var, loss_type='MSE')
        # 反向传播过程
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        epoch_loss += loss.detach().item()
        epoch_kld += kld.detach().item()
        epoch_kld_list.append(kld.detach().item())
        epoch_recons_loss += recons_loss.detach().item()
    epoch_loss /= (iter + 1)
    epoch_kld /= (iter + 1)
    epoch_recons_loss /= (iter + 1)
    return epoch_loss, epoch_kld, epoch_recons_loss, epoch_kld_list, optimizer


def val_epoch(model, device, data_loader):
    """验证一个epoch"""
    model.eval()
    iter = 0
    epoch_loss = 0
    epoch_kld = 0
    epoch_recons_loss = 0
    with torch.no_grad():
        for iter, (x, _) in enumerate(data_loader):
            x = x.to(device)
            x_hat, mu, log_var = model.forward(x)
            loss, recons_loss, kld = model.loss(x_hat, x, mu, log_var, loss_type='MSE')
            epoch_loss += loss.detach().item()
            epoch_kld += kld.detach().item()
            epoch_recons_loss += recons_loss.detach().item()
        epoch_loss /= (iter + 1)
        epoch_kld /= (iter + 1)
        epoch_recons_loss /= (iter + 1)
    return epoch_loss, epoch_kld, epoch_recons_loss


def train_VAE(config):
    """训练过程"""
    # 参数设置
    batch_size = config.batch_size
    num_epochs = config.num_epochs
    learning_rate = config.learning_rate
    device = config.device
    # 获取训练集和验证集
    x_train, y_train, x_valid, y_valid = load_data()
    train_dataset = TensorDataset(x_train, y_train)
    val_dataset = TensorDataset(x_valid, y_valid)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

    model = VAE().to(device)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    kld_list = []  # 保存KL分布变化情况，方便查看
    # 训练过程
    for epoch in range(num_epochs):
        start = time.time()

        train_loss, train_kld, train_recons_loss, train_kld_list, optimizer = \
            train_epoch(model, optimizer, device, train_loader)
        val_loss, val_kld, val_recons_loss = \
            val_epoch(model, device, val_loader)

        kld_list.extend(train_kld_list)
        epoch_time = time.time() - start
        # 打印相关信息
        print('*' * 80)
        print("epoch:{:d}\n"
              "train_loss:{:.5f}  train_recons_loss:{:.8f}  train_KLD:{:.8f}\n"
              "val_loss:  {:.5f}  val_recons_loss:  {:.8f}  val_KLD:  {:.8f}\n"
              "epoch_time:{:.5f} s".format(
            epoch + 1,
            train_loss, train_recons_loss, train_kld,
            val_loss, val_recons_loss, val_kld,
            epoch_time))
    return model, kld_list


def VAE_test(model, device):
    """输入某个验证集数据, 查看效果"""
    index = 0  # 要查看效果的数据下标
    _, _, x_valid, y_valid = load_data()
    x = x_valid[index].to(device)

    x = x.view(1, -1)  # 加入batch_size维度
    with torch.no_grad():
        x_hat = model.reconstruct(x)
    x_hat = x_hat.squeeze()  # 去掉batch_size维度

    x = x.detach().cpu().numpy()
    x_hat = x_hat.detach().cpu().numpy()
    fig = plt.figure()
    fig.add_subplot(1, 2, 1)
    plt.imshow(x.reshape((28, 28)), cmap="gray")
    fig.add_subplot(1, 2, 2)
    plt.imshow(x_hat.reshape((28, 28)), cmap="gray")
    plt.show()


def plt_kld(kld_list):
    """查看KL分布变化情况"""
    plt.figure()
    plt_x = range(len(kld_list))
    plt.plot(plt_x, kld_list)
    plt.title('KLD')
    plt.xlabel('iter')
    plt.ylabel('KLD')
    plt.show()


def sample_test(model, device):
    """对z进行采样，查看效果"""
    num_sample = 3
    with torch.no_grad():
        x_hat = model.sample(num_sample, device=device)
    x_hat = x_hat.detach().cpu().numpy()
    for i in range(num_sample):
        plt.imshow(x_hat[i].reshape((28, 28)), cmap="gray")
        plt.show()


def reconstruct_show(model, device):
    """输入多个验证集数据进行重构, 查看效果"""
    _, _, x_valid, y_valid = load_data()
    x_sample = x_valid[10:60]
    x_sample = x_sample.to(device)
    with torch.no_grad():
        x_reconstruct = model.reconstruct(x_sample)
    x_sample = x_sample.detach().cpu().numpy()
    x_reconstruct = x_reconstruct.detach().cpu().numpy()
    plt.figure(figsize=(8, 12))
    for i in range(5):
        plt.subplot(5, 2, 2 * i + 1)
        plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
        plt.title("Test input")
        plt.colorbar()
        plt.subplot(5, 2, 2 * i + 2)
        plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
        plt.title("Reconstruction")
        plt.colorbar()
    plt.tight_layout()
    plt.show()


def classifier_show(model, device):
    """输入验证集数据, 查看降维分类效果"""
    _, _, x_valid, y_valid = load_data()
    x_valid = x_valid.to(device)
    with torch.no_grad():
        z_mu = model.transform(x_valid)
    z_mu = z_mu.detach().cpu().numpy()
    y_valid = y_valid.detach().cpu().numpy()
    plt.figure(figsize=(8, 6))
    plt.scatter(z_mu[:, 0], z_mu[:, 1], c=y_valid)
    plt.colorbar()
    plt.grid()
    plt.show()


def latent_show(model, device):
    """均匀对z取值，查看重构效果"""
    nx = ny = 20
    x_values = np.linspace(-3, 3, nx)
    y_values = np.linspace(-3, 3, ny)

    canvas = np.empty((28 * ny, 28 * nx))
    for i, yi in enumerate(x_values):
        for j, xi in enumerate(y_values):
            z_mu = torch.Tensor([[xi, yi]])
            z_mu = z_mu.to(device)
            with torch.no_grad():
                x_mean = model.generate(z_mu)
            x_mean = x_mean.detach().cpu().numpy()
            canvas[(nx - i - 1) * 28:(nx - i) * 28, j * 28:(j + 1) * 28] = x_mean[0].reshape(28, 28)

    plt.figure(figsize=(8, 10))
    plt.imshow(canvas, origin="upper", cmap="gray")
    plt.tight_layout()
    plt.show()


class Config(object):
    def __init__(self):
        self.batch_size = 64
        self.num_epochs = 10
        self.learning_rate = 0.001
        self.device = gpu_setup(True, 0)


if __name__ == '__main__':
    config = Config()
    model, kld_list = train_VAE(config)
    plt_kld(kld_list)
    VAE_test(model, config.device)
    sample_test(model, config.device)
    reconstruct_show(model, config.device)
    classifier_show(model, config.device)
    latent_show(model, config.device)
