"""
训练网络识别 fashion_mnist

1. 数据集的导入
2. 网络的选择
3. 网络的训练
4. 网络的测试
"""

import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
import toolpy

"""
对于网络训练，首先分为这几个部分
1. 网络，用于输入特征，然后输出预测值
2. loss，预测值与真实值的差距，评估模型的loss单独部分
3. 优化器，根据这个loss进行反向传播，更新这个参数，再进行训练
4. 评估函数，评估本次训练的结果，训练第几轮，准确率
"""

batch_size = 20
# trans = transforms.ToTensor()  # 图片转换，将图片0-255转换
num_inputs = 28 * 28  # 网络输入节点数目
num_outputs = 10  # 网络输出节点数目


def get_fashion_mnist_labels(labels):
    "返回fashion_MNIST的文本标签， 由数字数组转换为字符串数组"
    test_labels = ["t-shirt", "trouser", "pullover", "dress", "coat", "sandal", "shirt", "sneaker", "bag", "ankle boot"]
    return [test_labels[int(i)] for i in labels]


def get_num_workers():
    "获取读取进程的数目"
    return 0

def softmax(x):
    x_exp = torch.exp(x)
    x_exp_sum = x_exp.sum(1,keepdim=True)
    return x_exp/x_exp_sum


def load_data_fashion_mnist(batch_size, resize=None):
    "下载fashion-mnist数据集，然后将迭代器加入内存中,对于该迭代器的遍历，第一个元素是批数据，第二个元素是标签列表"

    trans = [transforms.ToTensor()]
    if resize:  # 如果resize != None
        trans.insert(0, transforms.Resize(resize))
    trans = transforms.Compose(trans)

    mnist_train = torchvision.datasets.FashionMNIST(
        root="../data", train=True, transform=trans, download=True
    )
    mnist_test = torchvision.datasets.FashionMNIST(
        root="../data", train=False, transform=trans, download=True
    )

    return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_num_workers()),
            data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_num_workers()))

def accuracy(y_hat, y):
    "计算预测值的准确率"
    if y_hat.shape[0] != y.shape[0]:
        raise TypeError("出错：y_hat和y的样本数目不一样")
    elif y_hat.shape == 0:
        raise KeyError("出错：y_hat和y的数目为0")

    y_hat = y_hat.argmax(axis=1)
    cmp = (y_hat==y)
    return float(cmp.type(y.dtype).sum())


"""
损失函数
"""
def cross_entropy(y_hat, y):
    "交叉熵损失函数， 返回是所有熵"
    return -torch.log(y_hat[range(len(y_hat)), y])

def eveluate_acc(net, data_iter):
    "评估网络参数，计算准确率"
    if isinstance(net, torch.nn.Module): #判断是否是torch.nn.Module的子例
        net.eval()

    metric = toolpy.Accmulator(2) # 准确个数，个数
    with torch.no_grad():
        for X, y in data_iter:
            metric.add(accuracy(net.forward(X), y), y.numel())
    return metric[0]/metric[1]

def train_epoch_ch3(net, train_iter, loss, updater):
    "训练模型一个迭代周期"
    metric = toolpy.Accmulator(3) # 训练总损失，训练正确个数， 训练样本数

    # 模型设置为训练模式
    if isinstance(net, torch.nn.Module):
        net.train()

    for X, y in train_iter:
        y_hat = net.forward(X)
        loss_list = loss(y_hat, y)
        if isinstance(updater, torch.optim.Optimizer):
            # 使用Pytroch内置的优化器和损失函数
            updater.zero_grad()
            loss_list.mean().backward()
            updater.step()
        else:
            # 使用自定义的优化器
            loss_list.sum().backward()
            updater(X.shape[0])
        metric.add(loss_list.sum(), accuracy(y_hat, y), y.numel())

    return metric[1]/metric[2], metric[0]/metric[2] # 返回准确率，训练平均损失

def train_ch3(net, train_iter, test_iter, num_epoch, loss, updater):
    "训练的总函数"
    for epoch in range(num_epoch):
        train_mertic = train_epoch_ch3(net, train_iter, loss, updater)
        test_acc=eveluate_acc(net, test_iter)
        print("Epoch %d -------- Train-Acc:%.4f Train-Los:%.4f Test-Acc:%.4f" %(epoch+1, train_mertic[0], train_mertic[1], test_acc))

def predict_ch3(net, test_iter, n=6):
    for X, y in test_iter:
        break
    trues = get_fashion_mnist_labels(y)
    preds = get_fashion_mnist_labels(net.forward(X).argmax(axis=1))
    titles = [true+'\n'+pred for true, pred in zip(trues,preds)]
    toolpy.plot_images(X[0:n].reshape((n,28,28)),1, n,titles=titles[0:n])

"""
对于fanshion_mnist实现的网络类(自己定义，不用pytorch结构)
"""


class Net1():
    "net1 网络，第一代版本，没有隐藏层，输入和输出直接连接，是全连接，面向Fashion_MNIST数据集, 输入784个值，输出10个结果"

    def __init__(self):
        self.w = torch.normal(0, 0.01, size=(784, 10), requires_grad=True)
        self.b = torch.zeros(10, requires_grad=True)

    def forward(self, x): #每一行是一个样本
        ret = torch.matmul(x.reshape(-1, self.w.shape[0]), self.w) + self.b # 注意这里的x的参数必须这样设置，把数据展开为784列，每一行都是一个样本
        return softmax(ret)

    def updater(self, batch_size, lr=0.1):
        with torch.no_grad():
            self.w -= self.w.grad*lr/batch_size
            self.w.grad.zero_()
            self.b -= self.b.grad*lr/batch_size
            self.b.grad.zero_()
"""
test_demo
"""

# 绘制所有的mnist图像
# def test1():
#     batch_size = 20
#     train_iter = data.DataLoader(mnist_train, batch_size=batch_size, shuffle= True)
#
#     for i, (X,y) in enumerate(train_iter):
#         toolpy.plot_images(X.reshape(batch_size,28,28), 4,5,get_fashion_mnist_labels(y),
#                            "D:\Study\d2l-zh\data\FashionMNIST\pic\\"+str(i)+".png")
#         print("已保存"+str(i)+".png")
#
