{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "from torch import nn,optim\n",
    "from torch.autograd import Variable\n",
    "from torchvision import datasets, transforms\n",
    "from torch.utils.data import DataLoader\n",
    "import numpy as np\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取数据\n",
    "# 训练集\n",
    "train_dataset = datasets.MNIST(root='./',\n",
    "                             train=True,\n",
    "                             transform=transforms.ToTensor(),\n",
    "                             download=True)\n",
    "# 测试集\n",
    "test_dataset = datasets.MNIST(root='./',\n",
    "                             train=True,\n",
    "                             transform=transforms.ToTensor(),\n",
    "                             download=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 批次大小\n",
    "batch_size = 64\n",
    "\n",
    "# 装载训练集，其中shuffle表示打乱顺序，将data每64一份分成若干份进行装载\n",
    "train_loader = DataLoader(dataset=train_dataset,\n",
    "                         batch_size=batch_size,\n",
    "                         shuffle=True)\n",
    "\n",
    "# 装载测试集，其中shuffle表示打乱顺序\n",
    "test_loader = DataLoader(dataset=test_dataset,\n",
    "                         batch_size=batch_size,\n",
    "                         shuffle=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([64, 1, 28, 28])\n",
      "torch.Size([64])\n"
     ]
    }
   ],
   "source": [
    "# enumerate的作用是将一个可遍历的数据对象（例如列表、元组或字符串）组合为一个索引序列\n",
    "for i, data in enumerate(train_loader):\n",
    "    inputs, labels = data\n",
    "    print(inputs.shape)# inputs是64*1*28*28，表示64个图片，每个图片一个通道，28*28像素\n",
    "    print(labels.shape)# labels是标志每个图片的数字是几\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([2, 7, 7, 6, 7, 3, 2, 2, 9, 4, 3, 7, 4, 3, 9, 2, 0, 9, 7, 0, 1, 0, 0, 8,\n",
       "        9, 3, 8, 8, 3, 6, 5, 7, 5, 8, 4, 3, 8, 8, 5, 2, 9, 3, 5, 4, 5, 0, 6, 0,\n",
       "        3, 0, 0, 1, 8, 6, 5, 2, 3, 0, 3, 9, 0, 6, 8, 2])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义网络结构\n",
    "class Net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Net, self).__init__()\n",
    "        # nn.Sequential是指执行一个序列，下面的一条语句就相当于三条语句：nn.Linear(784, 500), nn.Dropout(p=0.5), nn.Tanh()\n",
    "        # Conv2d参数介绍：1个输入，输出32个特征图，卷积核大小为5，卷积步长为1,padding边缘两层为0；ReLU为激活函数\n",
    "        self.conv1 = nn.Sequential(nn.Conv2d(1, 32, 5, 1, 2), nn.ReLU())\n",
    "        self.poolling1 = nn.MaxPool2d(2, 2)# 最大池化方式，池化窗口为2*2，步长为2\n",
    "        self.conv2 = nn.Sequential(nn.Conv2d(32, 64, 5, 1, 2), nn.ReLU())\n",
    "        self.poolling2 = nn.MaxPool2d(2, 2)\n",
    "        # 接下来是全连接层\n",
    "        # 第一层卷积将一张28*28的图片卷积为32张特征图，尺寸不变（因为填充了2圈边缘）\n",
    "        # 第二层池化将图片池化为14*14的尺寸，因为池化窗口为2*2\n",
    "        # 第三层卷积将图片卷积为64张14*14的\n",
    "        # 第四层池化将图片池化为7*7的尺寸，因为池化窗口为2*2\n",
    "        # 所以将得到的特征进行全连接就得到了64*7*7个神经元（输入）\n",
    "        self.layer1 = nn.Sequential(nn.Linear(64*7*7, 1000), nn.Dropout(p=0.5), nn.ReLU())\n",
    "        # 最终将结果以第1个维度输出为概率形式\n",
    "        self.layer2 = nn.Sequential(nn.Linear(1000, 10), nn.Softmax(dim=1))\n",
    "    def forward(self, x):\n",
    "        # 卷积层的输入数据必须是四维的如[a, b, c, d]：表示每批有a个图片，每个图片有b个通道（如黑白就只有一个通道），图片尺寸为c*d。\n",
    "        x = self.conv1(x)\n",
    "        x = self.poolling1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = self.poolling2(x)\n",
    "        # 因为全连接要求数据是二维的，所以进行转换\n",
    "        x = x.view(x.size()[0],-1)\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义模型\n",
    "model = Net()\n",
    "# 定义交叉熵损失函数\n",
    "cel_loss = nn.CrossEntropyLoss()\n",
    "# 定义优化器\n",
    "optimizer = optim.SGD(model.parameters(), 0.5)# 学习率是0.5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train():\n",
    "    # 设置状态为训练状态，此时nn.Dropout(p=0.5)生效，也就是说有一半的神经元起作用\n",
    "    model.train()\n",
    "    for i, data in enumerate(train_loader):\n",
    "        # 获得一个批次的数据和标签\n",
    "        inputs, labels = data\n",
    "        # 获得模型的预测结果\n",
    "        out = model(inputs)\n",
    "        # 计算损失，交叉熵代价函数out(64,C),labels(64)\n",
    "        loss = cel_loss(out, labels)\n",
    "        # 梯度清零\n",
    "        optimizer.zero_grad()\n",
    "        # 计算梯度\n",
    "        loss.backward()\n",
    "        # 修改权值\n",
    "        optimizer.step()\n",
    "    \n",
    "def test():\n",
    "    # 设置状态为测试状态，则所有的神经元都起作用\n",
    "    model.eval()\n",
    "    # 得到测试集的准确率\n",
    "    correct = 0\n",
    "    for i, data in enumerate(test_loader):\n",
    "        # 获得一个批次的数据和标签\n",
    "        inputs, labels = data\n",
    "        # 获得模型预测结果（64,10）\n",
    "        out = model(inputs)\n",
    "        # 获得最大值，以及最大值所在位置\n",
    "        _,predicted = torch.max(out, 1)\n",
    "        # \n",
    "        correct += (predicted == labels).sum()\n",
    "        print(torch.max(out, 1))\n",
    "    print(\"Test acc:{0}\".format(correct.item()/len(test_dataset)))\n",
    "    \n",
    "    # 得到训练集的准确率\n",
    "    correct = 0\n",
    "    for i, data in enumerate(train_loader):\n",
    "        # 获得一个批次的数据和标签\n",
    "        inputs, labels = data\n",
    "        # 获得模型预测结果（64,10）\n",
    "        out = model(inputs)\n",
    "        # 获得最大值，以及最大值所在位置\n",
    "        _,predicted = torch.max(out, 1)\n",
    "        # \n",
    "        correct += (predicted == labels).sum()\n",
    "        print(torch.max(out, 1))\n",
    "    print(\"Train acc:{0}\".format(correct.item()/len(train_loader)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(10):\n",
    "    print('epoch:',epoch)\n",
    "    train()\n",
    "    test()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorchStudy",
   "language": "python",
   "name": "pytorchstudy"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
