# 数据集下载地址  https://yann.lecun.com/exdb/mnist/

import torch

import torchvision.datasets as dataset
from torchvision import transforms


#图片是 (1*28*28,label) 形式
train_data = dataset.MNIST(
    root="mnist", train=True, transform=transforms.ToTensor(), download=False)
#每批64个图片，打乱顺序
train_loader = torch.utils.data.DataLoader(
    train_data, batch_size=64, shuffle=True)


class CNN(torch.nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = torch.nn.Sequential(
            # 卷积 (1,28,28)->(32,28,28)，in_channel=1因为灰度，out_channel生成32个特征值
            # kernel_size卷积核用5*5，stride步长用1
            # padding为2因为卷积核是5，会在边界少2格，因此补回2
            torch.nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2),

            # 将输入调整为均值 0、方差 1 的分布 (32,28,28)
            torch.nn.BatchNorm2d(32),

            torch.nn.ReLU(), #正数不变负数变0 (32,28,28)

            # 池化，对卷积后的结果降维，尺寸除以2，(32,28,28)->(32,14,14)
            torch.nn.MaxPool2d(2)
        )

        # 线性变化，输入图像，输出十个数字的概率
        self.fc = torch.nn.Linear(32*14*14, 10)

    def forward(self, x):
        out = self.conv1(x)
        # 展平为一维向量,以便输入全连接层 32×14×14，-1表示自动计算剩余维度
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out


model = CNN()
model = model.cuda()
num_epochs = 15
#交叉熵损失函数
loss_func = torch.nn.CrossEntropyLoss()
#自适应学习率
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# 动态调整学习率的调度器
lr_scheduler = torch.optim.lr_scheduler.StepLR(
    optimizer, step_size=5, gamma=0.9)

model.train()
for epoch in range(num_epochs):
    print("training...{}".format(epoch))
    for (images, labels) in train_loader:
        images = images.cuda()
        labels = labels.cuda()

        # outputs是[0..9]十个数和输入图像的相似度，其中最大值就是预测的数
        outputs = model(images)

        loss = loss_func(outputs, labels)

        loss.backward()  # 反向传播，计算梯度
        optimizer.step()  # 更新参数
        lr_scheduler.step() # 调整学习率
        optimizer.zero_grad() # 清零梯度

    torch.save(model.state_dict(), "mnist.model")
