import torch
import torchvision
from torch.utils import data
from torchvision import transforms
import matplotlib.pyplot as plt


def get_dataloader_workers():
    return 0


def load_data_mnist(batch_size):
    '''
        下载并加载数据集
    '''
    Tr = transforms.ToTensor()

    mnist_train = torchvision.datasets.MNIST(root="../data",
                                             train=True,
                                             transform=Tr,
                                             download=True)
    mnist_test = torchvision.datasets.MNIST(root="../data",
                                            train=False,
                                            transform=Tr,
                                            download=True)

    return (data.DataLoader(mnist_train,
                            batch_size,
                            shuffle=True,
                            num_workers=get_dataloader_workers()),
            data.DataLoader(mnist_test,
                            batch_size,
                            shuffle=False,
                            num_workers=get_dataloader_workers()))


# class Accumulator:
#     '''
#         构造累加器
#     '''

#     def __init__(self, n):
#         self.data = [0.0] * n

#     def add(self, *args):
#         self.data = [a + float(b) for a, b in zip(self.data, args)]

#     def reset(self):
#         self.data = [0.0] * len(self.data)

#     def __getitem__(self, idx):
#         return self.data[idx]


# def evaluate_loss(net, data_iter, loss):
#     '''
#         计算损失
#     '''
#     net.eval()
#     metric = Accumulator(2)

#     for X, _ in data_iter:
#         h, X_ = net(X)
#         l = loss(X, X_, h)
#         metric.add(float(l), 1)
#     return metric[0] / metric[1]


# def train_epoch(net, train_iter, loss, updater):
#     '''
#         训练单个epoch
#     '''
#     net.train()
#     metric = Accumulator(2)
#     for X, _ in train_iter:
#         h, X_ = net(X)
#         l = loss(X, X_, h)
#         updater.zero_grad()
#         l.backward()
#         updater.step()

#         metric.add(float(l), 1)

#     return metric[0] / metric[1]


# def train(net, train_iter, test_iter, loss, num_epochs, updater):
#     '''
#         训练函数
#     '''
#     for epoch in range(num_epochs):
#         test_loss = evaluate_loss(net, test_iter, loss)
#         train_loss = train_epoch(net, train_iter, loss, updater)
#         print(
#             f'Epoch: {epoch+1}\tTrain_loss: {train_loss:.2f}\tTest_loss: {test_loss:.2f}'
#         )


def show_images(net, dataset, index, scale=1.5):
    '''
        绘图函数
    '''
    figsize = (5 * scale, 2 * scale)
    _, axes = plt.subplots(2, 5, figsize=figsize)
    axes = axes.flatten()

    for i in range(5):
        _img = dataset[index + i][0].reshape(1, 1, 28, 28)
        _, mean, std = net(_img)
        z = mean + torch.randn(std.shape) * std
        img = net.decoder(z).reshape(28, 28).detach().numpy()
        _img = _img.reshape(28, 28).numpy()

        axes[i].imshow(_img)
        axes[i + 5].imshow(img)
        axes[i].axes.get_xaxis().set_visible(False)
        axes[i].axes.get_yaxis().set_visible(False)
        axes[i + 5].axes.get_xaxis().set_visible(False)
        axes[i + 5].axes.get_yaxis().set_visible(False)
        axes[i].set_title(f'True Pic {i+1}')
        axes[i + 5].set_title(f'Generate {i+1}')
    plt.show()
    return
