import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms


#step1.加载训练集和测试集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
trainset = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
testset = datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)

#step2.搭建lenet(2层卷积，2层取样，2层全连接，中间非线性层用relu）
model = nn.Sequential(
          nn.Conv2d(1, 32, 3, 1),
          nn.ReLU(),
          nn.Conv2d(32, 64, 3, 1),
          nn.ReLU(),
          nn.MaxPool2d(2),
          nn.Dropout2d(0.25),
          nn.Flatten(),
          nn.Linear(9216, 128),
          nn.ReLU(),
          nn.Dropout2d(0.5),
          nn.Linear(128, 10),
          nn.LogSoftmax(dim=1))

# step3.定义损失函数和优化器（这里采用adam优化器，可以自适应地调整学习率大小以提高训练效果。）
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())

# step4.训练模型并测试模型
for epoch in range(20):
    runningloss = 0
    for i, data in enumerate(trainloader, 0):
        inputs, labels = data
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        runningloss = runningloss + loss.item()
    print(epoch + 1, runningloss/len(trainloader))
    correct = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total = total + labels.size(0)
            correct = correct + (predicted == labels).sum().item()
    print((100 * correct / total))


