{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "88dc24ff",
   "metadata": {},
   "source": [
    "## 用卷积神经网络训练Cifar100\n",
    "For this tutorial, we will use the CIFAR100 dataset. "
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "1c2411fc",
   "metadata": {},
   "source": [
    "## 1. Load and normalize CIFAR100\n",
    "Using torchvision, it's extremely easy to load CIFAR100."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f7963711",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# %matplotlib inline\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import torch.optim.lr_scheduler as lr_scheduler\n",
    "# import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ed493286",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using PyTorch version: 1.11.0+cu113  Device: cuda\n"
     ]
    }
   ],
   "source": [
    "# Set the device\n",
    "if torch.cuda.is_available():\n",
    "    device = torch.device('cuda')\n",
    "    torch.cuda.set_device(0)\n",
    "else:\n",
    "    device = torch.device('cpu')\n",
    "print('Using PyTorch version:', torch.__version__, ' Device:', device)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "12bc2962",
   "metadata": {},
   "source": [
    "The output of torchvision datasets are PILImage images of range [0, 1]. We transform them to Tensors of normalized range [-1, 1]."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2dcf4ab3",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Define the batch size and number of epochs\n",
    "batch_size = 128\n",
    "num_epochs = 50\n",
    "\n",
    "# Define the transformation for data preprocessing\n",
    "transform_train = transforms.Compose([\n",
    "            transforms.RandomCrop(32, padding=4),\n",
    "            transforms.RandomHorizontalFlip(15),\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343),\n",
    "                                 (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),\n",
    "        ])\n",
    "transform_test = transforms.Compose([\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343),\n",
    "                                 (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),\n",
    "        ])\n",
    "\n",
    "# Load the CIFAR-100 dataset\n",
    "train_dataset = torchvision.datasets.CIFAR100(root='./data', train=True, download=False, transform=transform_train)\n",
    "test_dataset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test)\n",
    "\n",
    "# Create data loaders\n",
    "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n",
    "test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "07f934de",
   "metadata": {},
   "source": [
    "## 2. Define SEK\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e63816e4",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "import torch.nn.functional as F\n",
    "from functools import reduce\n",
    "\n",
    "class SKConv(nn.Module):\n",
    "    def __init__(self, channels, branches=2, groups=32, reduce=16, stride=1, len=32):\n",
    "        super(SKConv, self).__init__()\n",
    "        len = max(channels // reduce, len)\n",
    "        self.branches = branches\n",
    "        self.channels = channels\n",
    "        self.convs = nn.ModuleList([])\n",
    "        for i in range(branches):\n",
    "            self.convs.append(nn.Sequential(\n",
    "                nn.Conv2d(channels, channels, kernel_size=3 + i * 2, stride=stride, padding=1 + i,\n",
    "                          groups=groups, bias=False),\n",
    "                nn.BatchNorm2d(channels),\n",
    "                nn.ReLU(inplace=True)\n",
    "            ))\n",
    "        self.gap = nn.AdaptiveAvgPool2d(1)\n",
    "        self.fc = nn.Linear(channels, len)\n",
    "        self.fcs = nn.ModuleList([])\n",
    "        for i in range(branches):\n",
    "            self.fcs.append(nn.Linear(len, channels))\n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        for i, conv in enumerate(self.convs):\n",
    "            val = conv(x).unsqueeze(dim=1)\n",
    "            if i == 0:\n",
    "                vals = val\n",
    "            else:\n",
    "                vals = torch.cat([vals, val], dim=1)\n",
    "        feas = torch.sum(vals, dim=1) \n",
    "        feas = self.gap(feas).flatten(1)\n",
    "        feas = self.fc(feas)\n",
    "        for i, fc in enumerate(self.fcs):\n",
    "            attention = fc(feas).unsqueeze(dim=1)\n",
    "            if i == 0:\n",
    "                attentions = attention\n",
    "            else:\n",
    "                attentions = torch.cat([attentions, attention], dim=1)\n",
    "        attentions = self.softmax(attentions)\n",
    "        attentions = attentions.unsqueeze(-1).unsqueeze(-1)\n",
    "        res = (vals * attentions).sum(dim=1)\n",
    "        return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "2494ff5d-e62e-4e1e-a354-24d0b5e9d8f4",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SKConv(\n",
      "  (convs): ModuleList(\n",
      "    (0): Sequential(\n",
      "      (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "      (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      (2): ReLU(inplace=True)\n",
      "    )\n",
      "    (1): Sequential(\n",
      "      (0): Conv2d(64, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=32, bias=False)\n",
      "      (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      (2): ReLU(inplace=True)\n",
      "    )\n",
      "  )\n",
      "  (gap): AdaptiveAvgPool2d(output_size=1)\n",
      "  (fc): Linear(in_features=64, out_features=32, bias=True)\n",
      "  (fcs): ModuleList(\n",
      "    (0): Linear(in_features=32, out_features=64, bias=True)\n",
      "    (1): Linear(in_features=32, out_features=64, bias=True)\n",
      "  )\n",
      "  (softmax): Softmax(dim=1)\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "test = SKConv(64)\n",
    "print(test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "5a01fccd-62a6-4e99-a699-caacdda42841",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SKUnit(nn.Module):\n",
    "    def __init__(self, in_channels, mid_channels, out_channels, stride=1):\n",
    "        super(SKUnit, self).__init__()\n",
    "\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv2d(in_channels, mid_channels, kernel_size=1, stride=1, bias=False),\n",
    "            nn.BatchNorm2d(mid_channels),\n",
    "            nn.ReLU(inplace=True)\n",
    "        )\n",
    "\n",
    "        self.conv2 = nn.Sequential(\n",
    "            SKConv(mid_channels, stride=stride),\n",
    "            nn.BatchNorm2d(mid_channels),      \n",
    "            nn.ReLU(inplace=True)\n",
    "        )\n",
    "\n",
    "        self.conv3 = nn.Sequential(\n",
    "            nn.Conv2d(mid_channels, out_channels, kernel_size=1, stride=1, bias=False),\n",
    "            nn.BatchNorm2d(out_channels)\n",
    "        )\n",
    "\n",
    "        if in_channels == out_channels: \n",
    "            self.shortcut = nn.Sequential()\n",
    "        else: \n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(out_channels)\n",
    "            )\n",
    "\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        residual = x\n",
    "        residual = self.shortcut(residual)\n",
    "\n",
    "        x = self.conv1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = self.conv3(x)\n",
    "        x += residual\n",
    "        return self.relu(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "c17b5bb0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义SKNet模型\n",
    "class SKNet(nn.Module):\n",
    "    def __init__(self, block=SKUnit, layers=[3, 4, 6, 3], num_classes=100):\n",
    "        super(SKNet, self).__init__()\n",
    "        self.in_channels = 64\n",
    "        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(64)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.layer1 = self.make_layer(block, 64, 64*4, layers[0], stride=1)\n",
    "        self.layer2 = self.make_layer(block, 128, 128*4, layers[1], stride=2)\n",
    "        self.layer3 = self.make_layer(block, 256, 256*4, layers[2], stride=2)\n",
    "        self.layer4 = self.make_layer(block, 512, 512*4, layers[3], stride=2)\n",
    "        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n",
    "        self.fc = nn.Linear(512*4, num_classes)\n",
    "\n",
    "    def make_layer(self, block, mid_channels, out_channels, blocks, stride=1):\n",
    "        strides = [stride] + [1] * (blocks - 1)\n",
    "        layers = []\n",
    "        for stride in strides:\n",
    "            layers.append(block(self.in_channels, mid_channels, out_channels, stride))\n",
    "            self.in_channels = out_channels\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "        out = self.layer1(out)\n",
    "        out = self.layer2(out)\n",
    "        out = self.layer3(out)\n",
    "        out = self.layer4(out)\n",
    "        out = self.avgpool(out)\n",
    "        out = out.view(out.size(0), -1)\n",
    "        out = self.fc(out)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "c2dbf7f5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "parameter:14.109732M\n"
     ]
    }
   ],
   "source": [
    "# Initialize the model\n",
    "model = SKNet().to(device)\n",
    "# print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))\n",
    "total = sum([param.nelement() for param in model.parameters()])\n",
    "print(\"parameter:%fM\" % (total/1e6))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "a34acf3b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # Load the model\n",
    "# PATH = './cifar_sknet.pth'\n",
    "# model.load_state_dict(torch.load(PATH))"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "12312f6f",
   "metadata": {},
   "source": [
    "## 3. Define a Loss function and optimizer\n",
    "Let’s use a Classification Cross-Entropy loss and Adam."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "40fbef67",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define the loss function and optimizer\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(model.parameters(), lr=0.05, momentum=0.9, weight_decay=5e-4)\n",
    "\n",
    "# Define the learning rate schedule\n",
    "lr_schedule_milestones = [int(num_epochs * 0.5), int(num_epochs * 0.75)]  # 20%、50% and 75% of total training epochs\n",
    "lr_schedule_gamma = 0.1  # Learning rate reduction factor\n",
    "\n",
    "# Define the learning rate scheduler\n",
    "scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=lr_schedule_milestones, gamma=lr_schedule_gamma)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "0ceb4d7b",
   "metadata": {},
   "source": [
    "## 4. Train the network\n",
    "This is when things start to get interesting. We simply have to loop over our data iterator, and feed the inputs to the network and optimize."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "654c8151",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Lists to store training and testing losses, and accuracies\n",
    "train_losses = []\n",
    "test_losses = []\n",
    "train_accs = []\n",
    "test_accs = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "3f8b32e3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/50], Step [100/391], Loss: 5.3627\n",
      "Epoch [1/50], Step [200/391], Loss: 4.9061\n",
      "Epoch [1/50], Step [300/391], Loss: 4.6661\n",
      "Epoch [1/50], Test Loss: 3.9244, Test Accuracy: 8.44%\n",
      "Epoch [2/50], Step [100/391], Loss: 3.8767\n",
      "Epoch [2/50], Step [200/391], Loss: 3.8068\n",
      "Epoch [2/50], Step [300/391], Loss: 3.7451\n",
      "Epoch [2/50], Test Loss: 3.3839, Test Accuracy: 17.55%\n",
      "Epoch [3/50], Step [100/391], Loss: 3.3801\n",
      "Epoch [3/50], Step [200/391], Loss: 3.3070\n",
      "Epoch [3/50], Step [300/391], Loss: 3.2434\n",
      "Epoch [3/50], Test Loss: 2.9268, Test Accuracy: 25.95%\n",
      "Epoch [4/50], Step [100/391], Loss: 2.8386\n",
      "Epoch [4/50], Step [200/391], Loss: 2.7945\n",
      "Epoch [4/50], Step [300/391], Loss: 2.7409\n",
      "Epoch [4/50], Test Loss: 2.4806, Test Accuracy: 35.24%\n",
      "Epoch [5/50], Step [100/391], Loss: 2.3652\n",
      "Epoch [5/50], Step [200/391], Loss: 2.3435\n",
      "Epoch [5/50], Step [300/391], Loss: 2.3199\n",
      "Epoch [5/50], Test Loss: 2.3480, Test Accuracy: 38.52%\n",
      "Epoch [6/50], Step [100/391], Loss: 2.0346\n",
      "Epoch [6/50], Step [200/391], Loss: 2.0380\n",
      "Epoch [6/50], Step [300/391], Loss: 2.0105\n",
      "Epoch [6/50], Test Loss: 1.9949, Test Accuracy: 45.99%\n",
      "Epoch [7/50], Step [100/391], Loss: 1.7995\n",
      "Epoch [7/50], Step [200/391], Loss: 1.7947\n",
      "Epoch [7/50], Step [300/391], Loss: 1.7779\n",
      "Epoch [7/50], Test Loss: 1.8639, Test Accuracy: 49.56%\n",
      "Epoch [8/50], Step [100/391], Loss: 1.5674\n",
      "Epoch [8/50], Step [200/391], Loss: 1.5918\n",
      "Epoch [8/50], Step [300/391], Loss: 1.5920\n",
      "Epoch [8/50], Test Loss: 1.8362, Test Accuracy: 50.02%\n",
      "Epoch [9/50], Step [100/391], Loss: 1.4290\n",
      "Epoch [9/50], Step [200/391], Loss: 1.4361\n",
      "Epoch [9/50], Step [300/391], Loss: 1.4409\n",
      "Epoch [9/50], Test Loss: 1.7810, Test Accuracy: 53.36%\n",
      "Epoch [10/50], Step [100/391], Loss: 1.3159\n",
      "Epoch [10/50], Step [200/391], Loss: 1.3290\n",
      "Epoch [10/50], Step [300/391], Loss: 1.3397\n",
      "Epoch [10/50], Test Loss: 2.0544, Test Accuracy: 55.56%\n",
      "Epoch [11/50], Step [100/391], Loss: 1.2041\n",
      "Epoch [11/50], Step [200/391], Loss: 1.2362\n",
      "Epoch [11/50], Step [300/391], Loss: 1.2597\n",
      "Epoch [11/50], Test Loss: 1.6164, Test Accuracy: 55.47%\n",
      "Epoch [12/50], Step [100/391], Loss: 1.1304\n",
      "Epoch [12/50], Step [200/391], Loss: 1.1596\n",
      "Epoch [12/50], Step [300/391], Loss: 1.1668\n",
      "Epoch [12/50], Test Loss: 1.4297, Test Accuracy: 60.39%\n",
      "Epoch [13/50], Step [100/391], Loss: 1.0264\n",
      "Epoch [13/50], Step [200/391], Loss: 1.0543\n",
      "Epoch [13/50], Step [300/391], Loss: 1.0785\n",
      "Epoch [13/50], Test Loss: 1.6696, Test Accuracy: 57.00%\n",
      "Epoch [14/50], Step [100/391], Loss: 1.0148\n",
      "Epoch [14/50], Step [200/391], Loss: 1.0161\n",
      "Epoch [14/50], Step [300/391], Loss: 1.0303\n",
      "Epoch [14/50], Test Loss: 2.1279, Test Accuracy: 58.74%\n",
      "Epoch [15/50], Step [100/391], Loss: 0.9344\n",
      "Epoch [15/50], Step [200/391], Loss: 0.9584\n",
      "Epoch [15/50], Step [300/391], Loss: 0.9649\n",
      "Epoch [15/50], Test Loss: 1.4811, Test Accuracy: 59.90%\n",
      "Epoch [16/50], Step [100/391], Loss: 0.8478\n",
      "Epoch [16/50], Step [200/391], Loss: 0.8867\n",
      "Epoch [16/50], Step [300/391], Loss: 0.9104\n",
      "Epoch [16/50], Test Loss: 1.3948, Test Accuracy: 61.58%\n",
      "Epoch [17/50], Step [100/391], Loss: 0.8154\n",
      "Epoch [17/50], Step [200/391], Loss: 0.8468\n",
      "Epoch [17/50], Step [300/391], Loss: 0.8789\n",
      "Epoch [17/50], Test Loss: 1.5347, Test Accuracy: 59.22%\n",
      "Epoch [18/50], Step [100/391], Loss: 0.7885\n",
      "Epoch [18/50], Step [200/391], Loss: 0.8280\n",
      "Epoch [18/50], Step [300/391], Loss: 0.8466\n",
      "Epoch [18/50], Test Loss: 1.4260, Test Accuracy: 60.74%\n",
      "Epoch [19/50], Step [100/391], Loss: 0.7472\n",
      "Epoch [19/50], Step [200/391], Loss: 0.7891\n",
      "Epoch [19/50], Step [300/391], Loss: 0.8165\n",
      "Epoch [19/50], Test Loss: 1.5705, Test Accuracy: 59.40%\n",
      "Epoch [20/50], Step [100/391], Loss: 0.7147\n",
      "Epoch [20/50], Step [200/391], Loss: 0.7529\n",
      "Epoch [20/50], Step [300/391], Loss: 0.7830\n",
      "Epoch [20/50], Test Loss: 1.6052, Test Accuracy: 58.26%\n",
      "Epoch [21/50], Step [100/391], Loss: 0.6913\n",
      "Epoch [21/50], Step [200/391], Loss: 0.7255\n",
      "Epoch [21/50], Step [300/391], Loss: 0.7548\n",
      "Epoch [21/50], Test Loss: 1.4890, Test Accuracy: 60.39%\n",
      "Epoch [22/50], Step [100/391], Loss: 0.6810\n",
      "Epoch [22/50], Step [200/391], Loss: 0.7132\n",
      "Epoch [22/50], Step [300/391], Loss: 0.7404\n",
      "Epoch [22/50], Test Loss: 1.5392, Test Accuracy: 59.75%\n",
      "Epoch [23/50], Step [100/391], Loss: 0.6581\n",
      "Epoch [23/50], Step [200/391], Loss: 0.6950\n",
      "Epoch [23/50], Step [300/391], Loss: 0.7219\n",
      "Epoch [23/50], Test Loss: 1.4030, Test Accuracy: 62.73%\n",
      "Epoch [24/50], Step [100/391], Loss: 0.6191\n",
      "Epoch [24/50], Step [200/391], Loss: 0.6632\n",
      "Epoch [24/50], Step [300/391], Loss: 0.6919\n",
      "Epoch [24/50], Test Loss: 1.4075, Test Accuracy: 62.42%\n",
      "Epoch [25/50], Step [100/391], Loss: 0.6143\n",
      "Epoch [25/50], Step [200/391], Loss: 0.6547\n",
      "Epoch [25/50], Step [300/391], Loss: 0.6792\n",
      "Epoch [25/50], Test Loss: 1.6116, Test Accuracy: 58.01%\n",
      "Epoch [26/50], Step [100/391], Loss: 0.4541\n",
      "Epoch [26/50], Step [200/391], Loss: 0.3997\n",
      "Epoch [26/50], Step [300/391], Loss: 0.3691\n",
      "Epoch [26/50], Test Loss: 0.9567, Test Accuracy: 73.28%\n",
      "Epoch [27/50], Step [100/391], Loss: 0.2183\n",
      "Epoch [27/50], Step [200/391], Loss: 0.2140\n",
      "Epoch [27/50], Step [300/391], Loss: 0.2154\n",
      "Epoch [27/50], Test Loss: 0.9683, Test Accuracy: 73.64%\n",
      "Epoch [28/50], Step [100/391], Loss: 0.1709\n",
      "Epoch [28/50], Step [200/391], Loss: 0.1678\n",
      "Epoch [28/50], Step [300/391], Loss: 0.1668\n",
      "Epoch [28/50], Test Loss: 0.9858, Test Accuracy: 74.09%\n",
      "Epoch [29/50], Step [100/391], Loss: 0.1310\n",
      "Epoch [29/50], Step [200/391], Loss: 0.1280\n",
      "Epoch [29/50], Step [300/391], Loss: 0.1278\n",
      "Epoch [29/50], Test Loss: 1.0040, Test Accuracy: 73.80%\n",
      "Epoch [30/50], Step [100/391], Loss: 0.1067\n",
      "Epoch [30/50], Step [200/391], Loss: 0.1041\n",
      "Epoch [30/50], Step [300/391], Loss: 0.1036\n",
      "Epoch [30/50], Test Loss: 1.0202, Test Accuracy: 73.74%\n",
      "Epoch [31/50], Step [100/391], Loss: 0.0818\n",
      "Epoch [31/50], Step [200/391], Loss: 0.0841\n",
      "Epoch [31/50], Step [300/391], Loss: 0.0866\n",
      "Epoch [31/50], Test Loss: 1.0449, Test Accuracy: 73.15%\n",
      "Epoch [32/50], Step [100/391], Loss: 0.0744\n",
      "Epoch [32/50], Step [200/391], Loss: 0.0722\n",
      "Epoch [32/50], Step [300/391], Loss: 0.0729\n",
      "Epoch [32/50], Test Loss: 1.0586, Test Accuracy: 73.48%\n",
      "Epoch [33/50], Step [100/391], Loss: 0.0624\n",
      "Epoch [33/50], Step [200/391], Loss: 0.0606\n",
      "Epoch [33/50], Step [300/391], Loss: 0.0614\n",
      "Epoch [33/50], Test Loss: 1.0710, Test Accuracy: 73.39%\n",
      "Epoch [34/50], Step [100/391], Loss: 0.0512\n",
      "Epoch [34/50], Step [200/391], Loss: 0.0509\n",
      "Epoch [34/50], Step [300/391], Loss: 0.0516\n",
      "Epoch [34/50], Test Loss: 1.0983, Test Accuracy: 73.43%\n",
      "Epoch [35/50], Step [100/391], Loss: 0.0427\n",
      "Epoch [35/50], Step [200/391], Loss: 0.0422\n",
      "Epoch [35/50], Step [300/391], Loss: 0.0431\n",
      "Epoch [35/50], Test Loss: 1.1070, Test Accuracy: 73.19%\n",
      "Epoch [36/50], Step [100/391], Loss: 0.0363\n",
      "Epoch [36/50], Step [200/391], Loss: 0.0373\n",
      "Epoch [36/50], Step [300/391], Loss: 0.0373\n",
      "Epoch [36/50], Test Loss: 1.1208, Test Accuracy: 73.56%\n",
      "Epoch [37/50], Step [100/391], Loss: 0.0312\n",
      "Epoch [37/50], Step [200/391], Loss: 0.0318\n",
      "Epoch [37/50], Step [300/391], Loss: 0.0332\n",
      "Epoch [37/50], Test Loss: 1.1174, Test Accuracy: 73.10%\n",
      "Epoch [38/50], Step [100/391], Loss: 0.0276\n",
      "Epoch [38/50], Step [200/391], Loss: 0.0280\n",
      "Epoch [38/50], Step [300/391], Loss: 0.0277\n",
      "Epoch [38/50], Test Loss: 1.1202, Test Accuracy: 73.32%\n",
      "Epoch [39/50], Step [100/391], Loss: 0.0257\n",
      "Epoch [39/50], Step [200/391], Loss: 0.0250\n",
      "Epoch [39/50], Step [300/391], Loss: 0.0254\n",
      "Epoch [39/50], Test Loss: 1.1224, Test Accuracy: 73.47%\n",
      "Epoch [40/50], Step [100/391], Loss: 0.0245\n",
      "Epoch [40/50], Step [200/391], Loss: 0.0256\n",
      "Epoch [40/50], Step [300/391], Loss: 0.0260\n",
      "Epoch [40/50], Test Loss: 1.1193, Test Accuracy: 73.38%\n",
      "Epoch [41/50], Step [100/391], Loss: 0.0245\n",
      "Epoch [41/50], Step [200/391], Loss: 0.0236\n",
      "Epoch [41/50], Step [300/391], Loss: 0.0243\n",
      "Epoch [41/50], Test Loss: 1.1189, Test Accuracy: 73.53%\n",
      "Epoch [42/50], Step [100/391], Loss: 0.0235\n",
      "Epoch [42/50], Step [200/391], Loss: 0.0233\n",
      "Epoch [42/50], Step [300/391], Loss: 0.0238\n",
      "Epoch [43/50], Step [100/391], Loss: 0.0228\n",
      "Epoch [43/50], Step [200/391], Loss: 0.0236\n",
      "Epoch [43/50], Step [300/391], Loss: 0.0236\n",
      "Epoch [43/50], Test Loss: 1.1216, Test Accuracy: 73.51%\n",
      "Epoch [44/50], Step [100/391], Loss: 0.0229\n",
      "Epoch [44/50], Step [200/391], Loss: 0.0226\n",
      "Epoch [44/50], Step [300/391], Loss: 0.0226\n",
      "Epoch [44/50], Test Loss: 1.1228, Test Accuracy: 73.41%\n",
      "Epoch [45/50], Step [100/391], Loss: 0.0218\n",
      "Epoch [45/50], Step [200/391], Loss: 0.0218\n",
      "Epoch [45/50], Step [300/391], Loss: 0.0219\n",
      "Epoch [45/50], Test Loss: 1.1219, Test Accuracy: 73.53%\n",
      "Epoch [46/50], Step [100/391], Loss: 0.0209\n",
      "Epoch [46/50], Step [200/391], Loss: 0.0217\n",
      "Epoch [46/50], Step [300/391], Loss: 0.0218\n",
      "Epoch [46/50], Test Loss: 1.1253, Test Accuracy: 73.46%\n",
      "Epoch [47/50], Step [100/391], Loss: 0.0214\n",
      "Epoch [47/50], Step [200/391], Loss: 0.0209\n",
      "Epoch [47/50], Step [300/391], Loss: 0.0215\n",
      "Epoch [47/50], Test Loss: 1.1232, Test Accuracy: 73.51%\n",
      "Epoch [48/50], Step [100/391], Loss: 0.0202\n",
      "Epoch [48/50], Step [200/391], Loss: 0.0214\n",
      "Epoch [48/50], Step [300/391], Loss: 0.0211\n",
      "Epoch [48/50], Test Loss: 1.1225, Test Accuracy: 73.59%\n",
      "Epoch [49/50], Step [100/391], Loss: 0.0226\n",
      "Epoch [49/50], Step [200/391], Loss: 0.0225\n",
      "Epoch [49/50], Step [300/391], Loss: 0.0221\n",
      "Epoch [49/50], Test Loss: 1.1258, Test Accuracy: 73.72%\n",
      "Epoch [50/50], Step [100/391], Loss: 0.0207\n",
      "Epoch [50/50], Step [200/391], Loss: 0.0202\n",
      "Epoch [50/50], Step [300/391], Loss: 0.0201\n",
      "Epoch [50/50], Test Loss: 1.1186, Test Accuracy: 73.68%\n"
     ]
    }
   ],
   "source": [
    "# Training loop\n",
    "total_steps = len(train_loader)\n",
    "for epoch in range(num_epochs):\n",
    "    train_loss = 0.0\n",
    "    train_total = 0\n",
    "    train_correct = 0\n",
    "\n",
    "    for i, (images, labels) in enumerate(train_loader):\n",
    "        images = images.to(device)\n",
    "        labels = labels.to(device)\n",
    "\n",
    "        # Forward pass\n",
    "        outputs = model(images)\n",
    "        loss = criterion(outputs, labels)\n",
    "\n",
    "        # Backward and optimize\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        train_loss += loss.item()\n",
    "        _, predicted = torch.max(outputs.data, 1)\n",
    "        train_total += labels.size(0)\n",
    "        train_correct += (predicted == labels).sum().item()\n",
    "\n",
    "        # Print training progress\n",
    "        if (i + 1) % 100 == 0:\n",
    "            print(\n",
    "                f\"Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{total_steps}], Loss: {train_loss / (i + 1):.4f}\")\n",
    "\n",
    "    train_acc = 100.0 * train_correct / train_total\n",
    "    train_losses.append(train_loss / total_steps)\n",
    "    train_accs.append(train_acc)\n",
    "    \n",
    "    scheduler.step()\n",
    "    # Evaluation on the test set\n",
    "    model.eval()\n",
    "    test_loss = 0.0\n",
    "    test_total = 0\n",
    "    test_correct = 0\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for images, labels in test_loader:\n",
    "            images = images.to(device)\n",
    "            labels = labels.to(device)\n",
    "\n",
    "            # Forward pass\n",
    "            outputs = model(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "\n",
    "            test_loss += loss.item()\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            test_total += labels.size(0)\n",
    "            test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "    test_acc = 100.0 * test_correct / test_total\n",
    "    test_losses.append(test_loss / len(test_loader))\n",
    "    test_accs.append(test_acc)\n",
    "\n",
    "    # Print test accuracy for the current epoch\n",
    "    print(f\"Epoch [{epoch+1}/{num_epochs}], Test Loss: {test_loss / len(test_loader):.4f}, Test Accuracy: {test_acc:.2f}%\")\n",
    "\n",
    "    # Switch back to training mode\n",
    "    model.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "da551fa6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save the model\n",
    "PATH = './cifar_sknet.pth'\n",
    "torch.save(model.state_dict(), PATH)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "9c2df4ca",
   "metadata": {},
   "source": [
    "## 5. Test the network on the test data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "41c04eae",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Accuracy of the model on the 10000 test images: 73.57%\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "ResNet(\n",
       "  (conv1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  (relu): ReLU(inplace=True)\n",
       "  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "  (layer1): Sequential(\n",
       "    (0): BasicBlock(\n",
       "      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (1): BasicBlock(\n",
       "      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (2): BasicBlock(\n",
       "      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer2): Sequential(\n",
       "    (0): BasicBlock(\n",
       "      (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): BasicBlock(\n",
       "      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (2): BasicBlock(\n",
       "      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (3): BasicBlock(\n",
       "      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer3): Sequential(\n",
       "    (0): BasicBlock(\n",
       "      (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): BasicBlock(\n",
       "      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (2): BasicBlock(\n",
       "      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (3): BasicBlock(\n",
       "      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (4): BasicBlock(\n",
       "      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (5): BasicBlock(\n",
       "      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer4): Sequential(\n",
       "    (0): BasicBlock(\n",
       "      (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): BasicBlock(\n",
       "      (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (2): BasicBlock(\n",
       "      (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n",
       "  (fc): Linear(in_features=512, out_features=100, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Evaluation on the test set\n",
    "model.eval()\n",
    "test_loss = 0.0\n",
    "test_total = 0\n",
    "test_correct = 0\n",
    "\n",
    "with torch.no_grad():\n",
    "    for images, labels in test_loader:\n",
    "        images = images.to(device)\n",
    "        labels = labels.to(device)\n",
    "\n",
    "        # Forward pass\n",
    "        outputs = model(images)\n",
    "        loss = criterion(outputs, labels)\n",
    "\n",
    "        test_loss += loss.item()\n",
    "        _, predicted = torch.max(outputs.data, 1)\n",
    "        test_total += labels.size(0)\n",
    "        test_correct += (predicted == labels).sum().item()\n",
    "\n",
    "test_acc = 100.0 * test_correct / test_total\n",
    "test_losses.append(test_loss / len(test_loader))\n",
    "test_accs.append(test_acc)\n",
    "\n",
    "# Print test accuracy for the current epoch\n",
    "print(f\"Test Accuracy of the model on the {len(test_loader.dataset)} test images: {test_acc:.2f}%\")\n",
    "\n",
    "# Switch back to training mode\n",
    "model.train()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e6197b53",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot the training and testing losses\n",
    "plt.figure(figsize=(10, 5))\n",
    "plt.plot(range(1, num_epochs + 1), train_losses, label='Training Loss')\n",
    "plt.plot(range(1, num_epochs + 1), test_losses, label='Testing Loss')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.title('Training and Testing Loss')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "\n",
    "# Plot the training and testing acc\n",
    "plt.figure(figsize=(10, 5))\n",
    "plt.plot(range(1, num_epochs + 1), train_accs, label='Training Accuracy')\n",
    "plt.plot(range(1, num_epochs + 1), test_accs, label='Testing Accuracy')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.title('Training and Testing Accuracy')\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
