import torch
import torchvision
from torch import nn
from torch.nn import init
import torchvision.transforms as transforms
from IPython import display
from matplotlib import pyplot as plt
from collections import OrderedDict


num_inputs = 784
num_outputs = 10
batch_size = 256
num_workers = 0  # 多线程数
path_dir = '~/Datasets/FashionMNIST'


#  下载数据库
def down_load_data(path_dir):
    mnist_train = torchvision.datasets.FashionMNIST(root=path_dir, train=True, download=True, transform=transforms.ToTensor())
    mnist_test = torchvision.datasets.FashionMNIST(root=path_dir, train=False, download=True, transform=transforms.ToTensor())
    return mnist_train, mnist_test


#  读取小批量图集
def load_data_iter(mnist_train, mnist_test, batch_size, num_workers):
    train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    return train_iter, test_iter


class FlattenLayer(nn.Module):
    def __init__(self):
        super(FlattenLayer, self).__init__()

    def forward(self, x):  # x shape: (batch, *, *, ...)
        return x.view(x.shape[0], -1)


# 将对`x`的形状转换的这个功能自定义一个`FlattenLayer`
net = nn.Sequential(
        OrderedDict([
          ('flatten', FlattenLayer()),
          ('linear', nn.Linear(num_inputs, num_outputs))])
        )

# 评估模型net在数据集data_iter中的准确率
def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    for X, y in data_iter:
        acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
        n += y.shape[0]
    return acc_sum / n


def sgd(params, lr, batch_size):
    """
        这里自动求梯度模块计算得来的梯度是一个批量样本的梯度和。
        为了和原书保持一致，这里除以了batch_size，但是应该是不用除的，因为一般用PyTorch计算loss时就默认已经
        沿batch维求了平均了。
    """
    for param in params:
        # 注意这里更改param时用的param.data，避免被`autograd`记录从而影响到梯度反向传播
        param.data -= lr * param.grad / batch_size


def train(net, train_iter, test_iter, loss, num_epochs, batch_size,
              params=None, lr=None, optimizer=None):
    """ training network """
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for x, y in train_iter:
            y_hat = net(x)
            l = loss(y_hat, y).sum()

            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()

            l.backward()
            if optimizer is None:
                sgd(params, lr, batch_size)
            else:
                optimizer.step()  #需要传入优化算法

            train_l_sum += l.item()

            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            n += y.shape[0]
        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
              % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))


def get_fashion_mnist_labels(labels):
    """ 函数可以将数值标签转成相应的文本标签 """
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in labels]


def use_svg_display():
    """Use svg format to display plot in jupyter"""
    display.set_matplotlib_formats('svg')


def show_fashion_mnist(images, labels):
    """ 该函数在一行里画出多张图像和对应标签 """
    use_svg_display()
    # 这里的_表示我们忽略（不使用）的变量
    _, figs = plt.subplots(1, len(images), figsize=(12, 12))
    for f, img, lbl in zip(figs, images, labels):
        f.imshow(img.view((28, 28)).numpy())
        f.set_title(lbl)
        f.axes.get_xaxis().set_visible(False)
        f.axes.get_yaxis().set_visible(False)
    plt.show()


print("torch.__version__:", torch.__version__)
print("torchvision.__version__:", torchvision.__version__)

mnist_train, mnist_test = down_load_data(path_dir)
train_iter, test_iter = load_data_iter(mnist_train, mnist_test, batch_size, num_workers)

# 我们使用均值为0、标准差为0.01的正态分布随机初始化模型的权重参数。
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
# 包括softmax运算和交叉熵损失计算的函数
loss = nn.CrossEntropyLoss()
# 使用学习率为0.1的小批量随机梯度下降作为优化算法
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)

# 开始训练
num_epochs, lr = 5, 0.1
train(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)

# # 预测
# x, y = iter(test_iter).next()
# true_labels = get_fashion_mnist_labels(y.numpy())
# pred_labels = get_fashion_mnist_labels(net(x).argmax(dim=1).numpy())
# titles = [true + '\n' + pred for true, pred in zip(true_labels, pred_labels)]
#
# # 画前10个预测结果
# show_fashion_mnist(x[0:9], titles[0:9])
