import time
from torch.utils.data import Dataset
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence


class EmWaveLoader(Dataset):

    def __init__(self, data_path):
        self.data_path = data_path
        self.data = np.load(self.data_path)
        # self.data=torch.from_numpy(self.data) # 变量转变为tensor
        self.data = torch.Tensor(self.data)

    # 数据集大小
    def __len__(self):
        return self.data.shape[0]

    def __getitem__(self, index):
        return self.data[index, :]


def weights_init(m):
    """
    模型权重初始化
    :param m:
    :return:
    """
    classname = m.__class__.__name__
    # 尝试对卷积层权重初始化'Conv'
    if classname.find('Linear') != -1:
        try:
            nn.init.xavier_uniform_(m.weight.data)
            m.bias.data.fill_(0)
        except AttributeError:
            print("Skipping initialization of ", classname)


class VAE(nn.Module):
    def __init__(self, input_dim, dim, z_dim):
        super().__init__()
        self.encoder = nn.Sequential(
            nn.Linear(in_features=1001, out_features=200),
            # nn.Linear(500,200),
            nn.ReLU(),
            nn.Linear(200, 20)
            # nn.Conv2d(input_dim, dim, 4, 2, 1),
            # nn.BatchNorm2d(dim),
            # nn.ReLU(True),
            # nn.Conv2d(dim, dim, 4, 2, 1),
            # nn.BatchNorm2d(dim),
            # nn.ReLU(True),
            # nn.Conv2d(dim, dim, 5, 1, 0),
            # nn.BatchNorm2d(dim),
            # nn.ReLU(True),
            # nn.Conv2d(dim, z_dim * 2, 3, 1, 0),
            # nn.BatchNorm2d(z_dim * 2)
        )

        self.decoder = nn.Sequential(
            # nn.ConvTranspose2d(z_dim, dim, 3, 1, 0),
            # nn.BatchNorm2d(dim),
            # nn.ReLU(True),
            # nn.ConvTranspose2d(dim, dim, 5, 1, 0),
            # nn.BatchNorm2d(dim),
            # nn.ReLU(True),
            # nn.ConvTranspose2d(dim, dim, 4, 2, 1),
            # nn.BatchNorm2d(dim),
            # nn.ReLU(True),
            # nn.ConvTranspose2d(dim, input_dim, 4, 2, 1),
            # nn.Tanh()

            nn.ReLU(),

            nn.Linear(10, 1001)
        )

        self.apply(weights_init)

    def forward(self, x):
        # 将中间层分割成两部分，避免了增加层的步骤，但是在层设置过程中，要注意设置双倍数的层
        # 2 表示分割的数量， dim=1 表示分割后增加的维度位置
        # mu = self.encoder(x)
        mu, log_var = self.encoder(x).chunk(2)
        # print(mu.data, log_var.data)
        # 由模型决定的分布
        q_z_x = Normal(mu, log_var.mul(.5).exp())

        # 标准(0,1)正态分布
        p_z = Normal(torch.zeros_like(mu), torch.ones_like(log_var))

        # 分布之间的kl散度值
        # 先求和再平均？为什么这么做
        kl_div = kl_divergence(q_z_x, p_z).sum(0).mean()

        # 重参数构造，对于VAE是采用随机重新采样
        x_reconstruct = self.decoder(q_z_x.rsample())

        return x_reconstruct, kl_div


def train(vae, dataloader):
    train_loss = []
    opt = torch.optim.Adam(vae.parameters(), lr=0.0005, amsgrad=True)
    vae.train()
    # t=enumerate(train_loader)
    for batch_idx, x in enumerate(dataloader):
        start_time = time.time()
        # x = x.cuda()

        x_reconstruct, kl_d = vae(x)
        loss_recons = F.mse_loss(x_reconstruct, x, size_average=False) / x.size(0)
        loss = loss_recons + kl_d  # 计算损失

        # 这是？
        # nll = -Normal(x_reconstruct, torch.ones_like(x_reconstruct)).log_prob(x)
        # log_px = nll.mean().item() - np.log(128) + kl_d.item()
        # log_px /= np.log(2)

        opt.zero_grad()  # 梯度初始化为0
        loss.backward()  # 反向传播
        opt.step()  # 更新参数

        # train_loss.append([log_px, loss.item()])
        train_loss.append(loss.item())
        PRINT_INTERVAL = 1
        a = batch_idx
        b = len(dataloader.data)
        c = PRINT_INTERVAL * batch_idx
        d=c/b
        e = np.asarray(train_loss)[-PRINT_INTERVAL:].mean(0)
        if (batch_idx + 1) % PRINT_INTERVAL == 0:
            print('\tIter [{}/{} ({:.0f}%)]\tLoss: {} Time: {:5.3f} ms/batch'.format(
                batch_idx+1, len(dataloader.data),
                100*PRINT_INTERVAL * batch_idx / len(dataloader.data),
                np.asarray(train_loss)[-PRINT_INTERVAL:].mean(0),
                1000 * (time.time() - start_time)
            ))


def test(model, test_dataloader):
    """
    用于模型验证集的损失函数计算
    :param model: vae模型
    :param test_dataloader:测试集
    :return: 测试集的损失函数
    """
    start_time = time.time()
    val_loss = []
    model.eval()
    with torch.no_grad():
        # for batch_idx, (x, _) in enumerate(test_loader):
        for batch_idx, x_test in enumerate(test_dataloader):  # 对于仅x的数据集而言，应该不需要添加标签参数
            # x = x.cuda()
            x_reconstruct, kl_d = model(x_test)

            # MSE作为
            loss_recons = F.mse_loss(x_reconstruct, x_test, size_average=False) / x_test.size(0)

            loss = loss_recons + kl_d  # 交叉熵与KL散度之和为损失函数
            val_loss.append(loss.item())

    print('\nValidation Completed!\tLoss: {:5.4f} Time: {:5.3f} s'.format(
        np.asarray(val_loss).mean(0),
        time.time() - start_time
    ))
    # 返回loss平均值
    return np.asarray(val_loss).mean(0)


def generate_reconstructions(model, dataloader):
    # 模型切换到评估模式
    model.eval()
    # x, _ = test_loader.__iter__().next()
    x = next(dataloader.__iter__())
    x = x[:32].cuda()
    x_tilde, kl_div = model(x)

    x_cat = torch.cat([x, x_tilde], 0)


def generate_samples(model, zdim):
    # 训练好的模型，随机采样获得结果
    model.eval()
    z_e_x = torch.randn(64, zdim, 1, 1).cuda()
    x_tilde = model.decoder(z_e_x)
