import torch
# 神经网络库
import torch.nn as nn
from torch.autograd import Variable
# 数据加载器，结合了数据集和取样器，并且可以提供多个线程处理数据集
import torch.utils.data as Data
# 包括了图片的数据库
import torchvision
# 出图程序
import matplotlib.pyplot as plt

EPOCH = 1 # 训练几次
BATCH_SIZE = 50 # 数据集划分
LR = 0.001 # 学习率
# 数据下载完了定义为false
DOWNLOAD_MNIST = False# 下载数据集

train_data = torchvision.datasets.MNIST(
        root = './mnist', # 数据路径
        train = True, # True为训练集 False为测试集
        transform = torchvision.transforms.ToTensor(),
        download = DOWNLOAD_MNIST
)
# print(train_data)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) #迭代器
data = torchvision.datasets.MNIST(root='./mnist/', train=False) #提取的是test 测试数据
test_x = Variable(torch.unsqueeze(data.data, dim=1), volatile=True).type(torch.FloatTensor)[:2000]/255
test_y = data.targets[:2000]  # 取2000个数据是为了节约运算时间

# print(data)
class CNN(nn.Module): # 卷积->池化->卷积->池化->全连接层
    def __init__(self):
        super(CNN, self).__init__() #这里我是死记硬背的
        self.conv1 = nn.Sequential(
                nn.Conv2d(               # 卷积层
                        in_channels = 1, # 输入图片的高度
                        out_channels = 16, # 16个特征卷积过滤器
                        kernel_size = 5, #卷积宽度（长度）
                        stride = 1, # 步长
                        padding = 2, # 扩展图片边缘长宽度
                            # if stirder = 1, padding = (kernel_size-1)/2
                ), # ->(16, 28, 28))
                nn.ReLU(), #激活函数
                nn.MaxPool2d(kernel_size = 2), # 池化层→筛选重要信息 ！!取一定区域内最大值!！
        ) # -> (16, 14, 14))
        self.conv2 = nn.Sequential(
                nn.Conv2d(16, 32, 5, 1, 2),
                nn.ReLU(),
                nn.MaxPool2d(2)
        ) # -> (32, 7, 7))
        self.out = nn.Linear(32 * 7 * 7, 10)
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1) #将四维数据集变成二维
        output = self.out(x)
        return output

cnn = CNN()
# print(cnn)
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # lr 学习率
loss_func = nn.CrossEntropyLoss() # 分类误差计算

for epoch in range(EPOCH):
        for step, (x, y) in enumerate(train_loader):
                b_x = Variable(x)
                b_y = Variable(y)
                output = cnn(b_x)  # 数据集传入
                loss = loss_func(output, b_y)  # 计算误差
                optimizer.zero_grad()  # 导数清零
                loss.backward()
                optimizer.step()  # step从0开始，遍历一个BATCH_SIZE就加一，训练集有60000个数据，所以step最后值为1199

                if step % 50 == 0:  # 每五十个step就提示训练次数、误差和准确率
                        test_output = cnn(test_x)
                        pred_y = torch.max(test_output, 1)[1].data.squeeze()  # 取行中最大概率值
                        accuracy = (pred_y == test_y).sum().item() / test_y.size(0)  # 行的大小
                        print('Epoch:', epoch, '| train loss:%.4f' % loss.data, '| test accuracy:', accuracy)

test_output = cnn(test_x[:10]) # 利用前二十个测试数据检测
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10].numpy(), 'real number')
