import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms


#1.数据预处理与加载
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5,), (0.5,))
])

#下载并且加载数据集
train_dataset = torchvision.datasets.MNIST(
    root='./data',                              #数据保存路径
    train=True,                                 #训练集
    transform=transform,                        #应用上面定义的数据预处理
    download=True                               #若不存在就下载
)
test_dataset = torchvision.datasets.MNIST(
    root='./data',
    train=False,                                #测试集
    transform=transform,
    download=True
)

train_loader = torch.utils.data.DataLoader(
    dataset=train_dataset,
    batch_size=64,                              #每次训练使用64张图片
    shuffle=True                                #打乱数据顺序
)
test_loader = torch.utils.data.DataLoader(
    dataset=train_dataset,
    batch_size=1000,                            #测试时可以大一点
    shuffle=False                               #测试不需要打乱
)

#查看数据集信息
print(f"训练集大小: {len(train_dataset)}")
print(f"测试集大小: {len(test_dataset)}")
print(f"图片形状: {train_dataset[0][0].shape}")  # [1, 28, 28]
print(f"类别数量: 10 (数字0-9)")

#2.定义超参数
input_size = 28     #图像的总尺寸为28*28  FC:batch*784(二维)  CNN:batch*1*28*28(batch*灰度*h*w)(四维)
num_classes = 10    #标签的种类数
num_epochs = 5     #循环周期
batch_size = 64     #一个批次的大小（64）

#3.定义神经网络模型
class Mnist(nn.Module):
    def __init__(self):                                         #构造函数
        super(Mnist, self).__init__()

        self.conv1 = nn.Sequential(                             #输入是(1,28,28)
            nn.Conv2d(                                          #2d卷积
                in_channels=1,                                  #灰度图
                out_channels=16,                                #要得到多少个特征图
                kernel_size=5,                                  #卷积核大小
                stride=1,                                       #步长
                padding=2,                                      #如果希望卷积后大小不变，需要设置padding=(kernel_size-1)/2 if stride = 1
            ),                                                  #输出的特征图是(16,28,28)
            nn.ReLU(),                                          #relu层
            nn.MaxPool2d(kernel_size=2, stride=2),              #进行池化操作(2*2 区域)，输出结果为(16,14,14)
        )

        self.conv2 = nn.Sequential(                             #下一个套餐的输入(16,14,14)
            nn.Conv2d( 16, 32, 5, 1, 2),                        #输出(32,14,14)
            nn.ReLU(),                                          #relu层
            nn.Conv2d( 32, 32, 5, 1, 2),
            nn.ReLU(),
            nn.MaxPool2d(2),                                    #输出为(32,7,7)
        )
        self.conv3 = nn.Sequential(                             # 下一个套餐输入为(16,14,14)
            nn.Conv2d(32, 64, 5, 1, 2),                         # 输出(32,14,14)
            nn.ReLU(),
        )                                                       # 输出(64,7,7)

        self.out = nn.Linear(64 * 7 * 7, 10)

    #前向传播
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = x.view(x.size(0),-1)                                 #flatten操作，结果为：(batch_size,64*7*7)
        out = self.out(x)
        return out

#4.评估
def accuracy(prediction, labels):
    pred = torch.max(prediction, 1)[1]                           #取最大值的索引
    correct = pred.eq(labels.data.view_as(pred)).sum()
    return correct , len(labels)

#5.训练网络模型
net = Mnist()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=1e-5)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)

for epoch in range(num_epochs):
    train_rights = []

    for batch_idx, (data, target) in enumerate(train_loader):
        net.train()
        output = net(data)
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        right = accuracy(output, target)
        train_rights.append(right)

        if batch_idx % 100 == 0:
            net.eval()
            val_rights = []

            for (data, target) in test_loader:
                output = net(data)
                right = accuracy(output, target)
                val_rights.append(right)

            #准确率计算
            train_r = (sum([tup[0] for tup in train_rights]), sum([tup[1] for tup in train_rights]))
            val_r = (sum([tup[0] for tup in val_rights]),sum([tup[1] for tup in val_rights]))

            print('当前epoch: {} [{}/{}({:.0f}%)]\t损失:{:.6f}\t训练集准确率:{:.2f}%\t测试集准确率:{:.2f}%'.format(
                epoch,batch_idx * batch_size , len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.data,
                100. * train_r[0].numpy() / train_r[1],
                100. * val_r[0].numpy() / val_r[1]))


