#!/usr/bin/python
# -*- coding:utf-8 -*-
# @FileName : DL6_test6_1.py
# Author    : myh
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
from torch import nn


batch_size =256
trans = transforms.ToTensor()
mnist_train = torchvision.datasets.MNIST(
    root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.MNIST(
    root="../data", train=False, transform=trans, download=True)
train_iter2 = data.DataLoader(mnist_train, batch_size, shuffle=True,
                             num_workers=d2l.get_dataloader_workers())
test_iter2 = data.DataLoader(mnist_test, batch_size, shuffle=True,
                            num_workers=d2l.get_dataloader_workers())
net = nn.Sequential(
    # 5x5卷积层 输入通道：1  输出通道：6（6组卷积核），对应6组结果 ，输出维度:6 x n x n
    nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),  # 平均池化，窗口大小为2*2,输出维度:6 x n/2 x n/2

    # nn.MaxPool2d(kernel_size=2, stride=2),  # 最大池化
    #  5x5卷积层 输入通道：6  输出通道：16（6组卷积核），对应16组结果 ，输出维度:16 x n/2 x n/2
    nn.Conv2d(6, 16, kernel_size=5), nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),  # 平均池化，输出维度减半
    nn.Flatten(),
    nn.Linear(16 * 5 * 5, 120), nn.ReLU(),  # 全连接层
    nn.Linear(120, 84), nn.ReLU(),
    nn.Linear(84, 10))

lr, num_epochs = 0.4, 5  # 大约 6 轮往后直接就爆炸
d2l.train_ch6(net, train_iter2, test_iter2, num_epochs, lr, d2l.try_gpu())
