import torch
from torchvision.datasets import MNIST
from torchvision import transforms

mnist_train = MNIST(root='MNIST_data/',
                          train=True,
                          transform=transforms.ToTensor(),
                          download=True)

mnist_test = MNIST(root='MNIST_data/',
                         train=False,
                         transform=transforms.ToTensor(),
                         download=True)

train=torch.utils.data.DataLoader(mnist_train,batch_size=64,shuffle=True)

device=torch.device('cuda:0'if torch.cuda.is_available() else 'cpu')

'''
1.卷积， 卷积核3*3， 步长1， 做0补边
2.池化， 池化核2*2， 步长2   0补边
3.卷积， 卷积核3*3， 步长1   0补边
4.池化， 池化核2*2   步长2   0补边
5 使用全连接，进行模型分类
'''
model=torch.nn.Sequential( # 28*28*1
    torch.nn.Conv2d(1,32,kernel_size=(3,3),stride=1,padding=1), # 28*28*32
    torch.nn.MaxPool2d(kernel_size=(2,2),stride=2,padding=0),# 14*14*32
    torch.nn.Conv2d(32,64,kernel_size=(3,3),stride=1,padding=1),# 14*14*64
    torch.nn.MaxPool2d(kernel_size=(2,2),stride=2,padding=0),# 7*7*64
    torch.nn.Flatten(),
    torch.nn.Linear(7*7*64,10),
).to(device)

criterion=torch.nn.CrossEntropyLoss()

optimizer=torch.optim.Adam(model.parameters(),lr=1e-5)

for i in range(10):
    for j,(batch_x,batch_y) in enumerate(train):
        X=batch_x.cuda()
        Y=batch_y.cuda()
        # print(X.shape)
        # print(Y.shape)

        optimizer.zero_grad()
        h=model(X)
        # print(h.shape)
        cost=criterion(h,Y)
        cost.backward()
        optimizer.step()
    pred=torch.argmax(h,1)
    acc=(pred==Y).float().mean()
    print(i+1,cost.item(),acc)

