# pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import Dataset

from torchvision import datasets
from torchvision.transforms import ToTensor
from torchvision.datasets import MNIST

import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm


# 定一个模型类，继承nn.Module类
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        # 设计神经网络层级，此处是3层
        self.nn = nn.Sequential(
            # 模型宽度
            nn.Linear(28 * 28, 256),
            # 模型批标准化
            nn.BatchNorm1d(256),
            # 激活函数
            nn.ReLU(inplace=True),

            nn.Linear(256, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),

            nn.Linear(256, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),
        )
        # 分类头，输入的是0-9，所以是10
        self.head = nn.Sequential(
            nn.Linear(256, 10)
        )

    # 前向传播过程
    def forward(self, x):
        return self.head(self.nn(x))


# 训练函数
def train(model, dataset, batch_size, epochs):
    """
    :param model: 模型
    :param dataset: 数据集
    :param batch_size: 批次大小
    :param epochs: 迭代轮数
    :return: none
    """
    # 数据加载器，给一个数据集，按批次大小加载数据
    dl = DataLoader(dataset, batch_size, shuffle=True)
    # 优化器，参数优化，梯度下降
    opt = torch.optim.AdamW(model.parameters(), lr=2e-4)
    # 训练逻辑
    for epoch in range(epochs):
        # 遍历数据集
        train_bar = tqdm(dl)
        for images, labels in train_bar:
            # 清空模型中所有参数的梯度
            opt.zero_grad()
            # 前向传播
            outputs = model(images.reshape(images.shape[0], -1))
            # 损失值
            loss = F.cross_entropy(outputs, labels)
            # 反向传播
            loss.backward()
            # 更新参数
            opt.step()
            # 打印训练进度
            train_bar.set_description(f"{loss.item():.4f}")
        # 保存模型
        torch.save(model.state_dict(), f"{epoch}.pth")


if __name__ == '__main__':
    # 初始化model类
    m = Model()
    # inp = torch.randn(2, 1, 28, 28)
    # inp = inp.reshape(2, -1)
    # print(inp.shape)
    # out = m(inp)
    # print(out.shape)
    # 把数据转换成张量，转换成torch支持的数格式
    mnist = MNIST("./data", download=True, transform=ToTensor())
    # print(len(mnist))
    # print(mnist[0])
    # plt.imshow(np.array(mnist[0][0]))
    # plt.show()
    # 开始训练
    train(m, mnist, 16, 10)
    training_data = datasets.FashionMNIST(
        root="data",
        train=True,
        download=True,
        transform=ToTensor()
    )

    test_data = datasets.FashionMNIST(
        root="data",
        train=False,
        download=True,
        transform=ToTensor()
    )

    labels_map = {
        0: "T-Shirt",
        1: "Trouser",
        2: "Pullover",
        3: "Dress",
        4: "Coat",
        5: "Sandal",
        6: "Shirt",
        7: "Sneaker",
        8: "Bag",
        9: "Ankle Boot",
    }
    figure = plt.figure(figsize=(8, 8))
    cols, rows = 3, 3
    for i in range(1, cols * rows + 1):
        sample_idx = torch.randint(len(training_data), size=(1,)).item()
        img, label = training_data[sample_idx]
        figure.add_subplot(rows, cols, i)
        plt.title(labels_map[label])
        plt.axis("off")
        plt.imshow(img.squeeze(), cmap="gray")
    plt.show()

    from torch.utils.data import DataLoader

    train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
    test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)

    # Display image and label.
    i = 0
    for train_features,train_labels in train_dataloader:
        # train_features, train_labels = next(iter(train_dataloader))
        print(f"Feature batch shape: {train_features.size()}")
        print(f"Labels batch shape: {train_labels.size()}")
        img = train_features[0].squeeze()
        label = train_labels[0]
        plt.imshow(img, cmap="gray")
        plt.show()
        print(f"Label: {label}")
        i +=1

    print(i)
