{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7bbbedc0-c086-4c71-b0ab-bb09e4732d5a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "\n",
    "#from thop import profile\n",
    "#from thop import clever_format\n",
    "\n",
    "class SKConv(nn.Module):\n",
    "    def __init__(self, features, M=3, G=32, r=16, stride=1 ,L=32):\n",
    "        \"\"\" Constructor\n",
    "        Args:\n",
    "            features: input channel dimensionality.\n",
    "            M: the number of branchs.\n",
    "            G: num of convolution groups.\n",
    "            r: the ratio for compute d, the length of z.\n",
    "            stride: stride, default 1.\n",
    "            L: the minimum dim of the vector z in paper, default 32.\n",
    "        \"\"\"\n",
    "        super(SKConv, self).__init__()\n",
    "        d = max(int(features/r), L)\n",
    "        self.M = M\n",
    "        self.features = features\n",
    "        self.convs = nn.ModuleList([])\n",
    "        for i in range(M):\n",
    "            self.convs.append(nn.Sequential(\n",
    "                nn.Conv2d(features, features, kernel_size=3, stride=stride, padding=1+i, dilation=1+i, groups=G, bias=False),\n",
    "                nn.BatchNorm2d(features),\n",
    "                nn.ReLU(inplace=True)\n",
    "            ))\n",
    "        self.gap = nn.AdaptiveAvgPool2d((1,1))\n",
    "        self.fc = nn.Sequential(nn.Conv2d(features, d, kernel_size=1, stride=1, bias=False),\n",
    "                                nn.BatchNorm2d(d),\n",
    "                                nn.ReLU(inplace=True))\n",
    "        self.fcs = nn.ModuleList([])\n",
    "        for i in range(M):\n",
    "            self.fcs.append(\n",
    "                 nn.Conv2d(d, features, kernel_size=1, stride=1)\n",
    "            )\n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        \n",
    "        batch_size = x.shape[0]\n",
    "        \n",
    "        feats = [conv(x) for conv in self.convs]      \n",
    "        feats = torch.cat(feats, dim=1)\n",
    "        feats = feats.view(batch_size, self.M, self.features, feats.shape[2], feats.shape[3])\n",
    "        \n",
    "        feats_U = torch.sum(feats, dim=1)\n",
    "        feats_S = self.gap(feats_U)\n",
    "        feats_Z = self.fc(feats_S)\n",
    "\n",
    "        attention_vectors = [fc(feats_Z) for fc in self.fcs]\n",
    "        attention_vectors = torch.cat(attention_vectors, dim=1)\n",
    "        attention_vectors = attention_vectors.view(batch_size, self.M, self.features, 1, 1)\n",
    "        attention_vectors = self.softmax(attention_vectors)\n",
    "        \n",
    "        feats_V = torch.sum(feats*attention_vectors, dim=1)\n",
    "        \n",
    "        return feats_V"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "462b7581-c275-4f31-b555-e18ef3804e27",
   "metadata": {},
   "source": [
    "## combine SKnet with Inception_v4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "b7bc97a0-2d2d-4551-9a16-f85b41a006a1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: UTF-8 -*-\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "class BasicConv2d(nn.Module):\n",
    "\n",
    "    def __init__(self, input_channels, output_channels, **kwargs):\n",
    "        super().__init__()\n",
    "        self.conv = nn.Conv2d(input_channels, output_channels, bias=False, **kwargs)\n",
    "        self.bn = nn.BatchNorm2d(output_channels)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv(x)\n",
    "        x = self.bn(x)\n",
    "        x = self.relu(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "class Inception_Stem(nn.Module):\n",
    "\n",
    "    #\"\"\"Figure 3. The schema for stem of the pure Inception-v4 and\n",
    "    #Inception-ResNet-v2 networks. This is the input part of those\n",
    "    #networks.\"\"\"\n",
    "    def __init__(self, input_channels):\n",
    "        super().__init__()\n",
    "        self.conv1 = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 32, kernel_size=3),\n",
    "            BasicConv2d(32, 32, kernel_size=3, padding=1),\n",
    "            BasicConv2d(32, 64, kernel_size=3, padding=1)\n",
    "        )\n",
    "\n",
    "        self.branch3x3_conv = BasicConv2d(64, 96, kernel_size=3, padding=1)\n",
    "        self.branch3x3_pool = nn.MaxPool2d(3, stride=1, padding=1)\n",
    "\n",
    "        self.branch7x7a = nn.Sequential(\n",
    "            BasicConv2d(160, 64, kernel_size=1),\n",
    "            BasicConv2d(64, 64, kernel_size=(7, 1), padding=(3, 0)),\n",
    "            BasicConv2d(64, 64, kernel_size=(1, 7), padding=(0, 3)),\n",
    "            BasicConv2d(64, 96, kernel_size=3, padding=1)\n",
    "        )\n",
    "\n",
    "        self.branch7x7b = nn.Sequential(\n",
    "            BasicConv2d(160, 64, kernel_size=1),\n",
    "            BasicConv2d(64, 96, kernel_size=3, padding=1)\n",
    "        )\n",
    "\n",
    "        self.branchpoola = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n",
    "        self.branchpoolb = BasicConv2d(192, 192, kernel_size=3, stride=1, padding=1)\n",
    "\n",
    "    def forward(self, x):\n",
    "\n",
    "        x = self.conv1(x)\n",
    "\n",
    "        x = [\n",
    "            self.branch3x3_conv(x),\n",
    "            self.branch3x3_pool(x)\n",
    "        ]\n",
    "        x = torch.cat(x, 1)\n",
    "\n",
    "        x = [\n",
    "            self.branch7x7a(x),\n",
    "            self.branch7x7b(x)\n",
    "        ]\n",
    "        x = torch.cat(x, 1)\n",
    "\n",
    "        x = [\n",
    "            self.branchpoola(x),\n",
    "            self.branchpoolb(x)\n",
    "        ]\n",
    "\n",
    "        x = torch.cat(x, 1)\n",
    "\n",
    "        return x\n",
    "\n",
    "class InceptionA(nn.Module):\n",
    "\n",
    "    #\"\"\"Figure 4. The schema for 35 × 35 grid modules of the pure\n",
    "    #Inception-v4 network. This is the Inception-A block of Figure 9.\"\"\"\n",
    "    def __init__(self, input_channels):\n",
    "        super().__init__()\n",
    "\n",
    "        self.branch3x3stack = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 64, kernel_size=1),\n",
    "            BasicConv2d(64, 96, kernel_size=3, padding=1),\n",
    "            BasicConv2d(96, 96, kernel_size=3, padding=1)\n",
    "        )\n",
    "\n",
    "        self.branch3x3 = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 64, kernel_size=1),\n",
    "            BasicConv2d(64, 96, kernel_size=3, padding=1)\n",
    "        )\n",
    "\n",
    "        self.branch1x1 = BasicConv2d(input_channels, 96, kernel_size=1)\n",
    "\n",
    "        self.branchpool = nn.Sequential(\n",
    "            nn.AvgPool2d(kernel_size=3, stride=1, padding=1),\n",
    "            BasicConv2d(input_channels, 96, kernel_size=1)\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "\n",
    "        x = [\n",
    "            self.branch3x3stack(x),\n",
    "            self.branch3x3(x),\n",
    "            self.branch1x1(x),\n",
    "            self.branchpool(x)\n",
    "        ]\n",
    "\n",
    "        return torch.cat(x, 1)\n",
    "\n",
    "class ReductionA(nn.Module):\n",
    "\n",
    "    #\"\"\"Figure 7. The schema for 35 × 35 to 17 × 17 reduction module.\n",
    "    #Different variants of this blocks (with various number of filters)\n",
    "    #are used in Figure 9, and 15 in each of the new Inception(-v4, - ResNet-v1,\n",
    "    #-ResNet-v2) variants presented in this paper. The k, l, m, n numbers\n",
    "    #represent filter bank sizes which can be looked up in Table 1.\n",
    "    def __init__(self, input_channels, k, l, m, n):\n",
    "\n",
    "        super().__init__()\n",
    "        self.branch3x3stack = nn.Sequential(\n",
    "            BasicConv2d(input_channels, k, kernel_size=1),\n",
    "            BasicConv2d(k, l, kernel_size=3, padding=1),\n",
    "            BasicConv2d(l, m, kernel_size=3, stride=2)\n",
    "        )\n",
    "\n",
    "        self.branch3x3 = BasicConv2d(input_channels, n, kernel_size=3, stride=2)\n",
    "        self.branchpool = nn.MaxPool2d(kernel_size=3, stride=2)\n",
    "        self.output_channels = input_channels + n + m\n",
    "\n",
    "    def forward(self, x):\n",
    "\n",
    "        x = [\n",
    "            self.branch3x3stack(x),\n",
    "            self.branch3x3(x),\n",
    "            self.branchpool(x)\n",
    "        ]\n",
    "\n",
    "        return torch.cat(x, 1)\n",
    "\n",
    "class InceptionB(nn.Module):\n",
    "\n",
    "    #\"\"\"Figure 5. The schema for 17 × 17 grid modules of the pure Inception-v4 network.\n",
    "    #This is the Inception-B block of Figure 9.\"\"\"\n",
    "    def __init__(self, input_channels):\n",
    "        super().__init__()\n",
    "\n",
    "        self.branch7x7stack = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 192, kernel_size=1),\n",
    "            BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3)),\n",
    "            BasicConv2d(192, 224, kernel_size=(7, 1), padding=(3, 0)),\n",
    "            BasicConv2d(224, 224, kernel_size=(1, 7), padding=(0, 3)),\n",
    "            BasicConv2d(224, 256, kernel_size=(7, 1), padding=(3, 0))\n",
    "        )\n",
    "\n",
    "        self.branch7x7 = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 192, kernel_size=1),\n",
    "            BasicConv2d(192, 224, kernel_size=(1, 7), padding=(0, 3)),\n",
    "            BasicConv2d(224, 256, kernel_size=(7, 1), padding=(3, 0))\n",
    "        )\n",
    "\n",
    "        self.branch1x1 = BasicConv2d(input_channels, 384, kernel_size=1)\n",
    "\n",
    "        self.branchpool = nn.Sequential(\n",
    "            nn.AvgPool2d(3, stride=1, padding=1),\n",
    "            BasicConv2d(input_channels, 128, kernel_size=1)\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = [\n",
    "            self.branch1x1(x),\n",
    "            self.branch7x7(x),\n",
    "            self.branch7x7stack(x),\n",
    "            self.branchpool(x)\n",
    "        ]\n",
    "\n",
    "        return torch.cat(x, 1)\n",
    "\n",
    "class ReductionB(nn.Module):\n",
    "\n",
    "    #\"\"\"Figure 8. The schema for 17 × 17 to 8 × 8 grid-reduction mod- ule.\n",
    "    #This is the reduction module used by the pure Inception-v4 network in\n",
    "    #Figure 9.\"\"\"\n",
    "    def __init__(self, input_channels):\n",
    "\n",
    "        super().__init__()\n",
    "        self.branch7x7 = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 256, kernel_size=1),\n",
    "            BasicConv2d(256, 256, kernel_size=(1, 7), padding=(0, 3)),\n",
    "            BasicConv2d(256, 320, kernel_size=(7, 1), padding=(3, 0)),\n",
    "            BasicConv2d(320, 320, kernel_size=3, stride=2, padding=1)\n",
    "        )\n",
    "\n",
    "        self.branch3x3 = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 192, kernel_size=1),\n",
    "            BasicConv2d(192, 192, kernel_size=3, stride=2, padding=1)\n",
    "        )\n",
    "\n",
    "        self.branchpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "\n",
    "    def forward(self, x):\n",
    "\n",
    "        x = [\n",
    "            self.branch3x3(x),\n",
    "            self.branch7x7(x),\n",
    "            self.branchpool(x)\n",
    "        ]\n",
    "\n",
    "        return torch.cat(x, 1)\n",
    "\n",
    "class InceptionC(nn.Module):\n",
    "\n",
    "    def __init__(self, input_channels):\n",
    "        #\"\"\"Figure 6. The schema for 8×8 grid modules of the pure\n",
    "        #Inceptionv4 network. This is the Inception-C block of Figure 9.\"\"\"\n",
    "\n",
    "        super().__init__()\n",
    "\n",
    "        self.branch3x3stack = nn.Sequential(\n",
    "            BasicConv2d(input_channels, 384, kernel_size=1),\n",
    "            BasicConv2d(384, 448, kernel_size=(1, 3), padding=(0, 1)),\n",
    "            BasicConv2d(448, 512, kernel_size=(3, 1), padding=(1, 0)),\n",
    "        )\n",
    "        self.branch3x3stacka = BasicConv2d(512, 256, kernel_size=(1, 3), padding=(0, 1))\n",
    "        self.branch3x3stackb = BasicConv2d(512, 256, kernel_size=(3, 1), padding=(1, 0))\n",
    "\n",
    "        self.branch3x3 = BasicConv2d(input_channels, 384, kernel_size=1)\n",
    "        self.branch3x3a = BasicConv2d(384, 256, kernel_size=(3, 1), padding=(1, 0))\n",
    "        self.branch3x3b = BasicConv2d(384, 256, kernel_size=(1, 3), padding=(0, 1))\n",
    "\n",
    "        self.branch1x1 = BasicConv2d(input_channels, 256, kernel_size=1)\n",
    "\n",
    "        self.branchpool = nn.Sequential(\n",
    "            nn.AvgPool2d(kernel_size=3, stride=1, padding=1),\n",
    "            BasicConv2d(input_channels, 256, kernel_size=1)\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        branch3x3stack_output = self.branch3x3stack(x)\n",
    "        branch3x3stack_output = [\n",
    "            self.branch3x3stacka(branch3x3stack_output),\n",
    "            self.branch3x3stackb(branch3x3stack_output)\n",
    "        ]\n",
    "        branch3x3stack_output = torch.cat(branch3x3stack_output, 1)\n",
    "\n",
    "        branch3x3_output = self.branch3x3(x)\n",
    "        branch3x3_output = [\n",
    "            self.branch3x3a(branch3x3_output),\n",
    "            self.branch3x3b(branch3x3_output)\n",
    "        ]\n",
    "        branch3x3_output = torch.cat(branch3x3_output, 1)\n",
    "\n",
    "        branch1x1_output = self.branch1x1(x)\n",
    "\n",
    "        branchpool = self.branchpool(x)\n",
    "\n",
    "        output = [\n",
    "            branch1x1_output,\n",
    "            branch3x3_output,\n",
    "            branch3x3stack_output,\n",
    "            branchpool\n",
    "        ]\n",
    "\n",
    "        return torch.cat(output, 1)\n",
    "\n",
    "class SKnet_InceptionV4(nn.Module):\n",
    "\n",
    "    def __init__(self, A, B, C, k=192, l=224, m=256, n=384, class_nums=100):\n",
    "\n",
    "        super().__init__()\n",
    "        self.stem = Inception_Stem(3)\n",
    "        self.inception_a1 = self._generate_inception_module(384, 384, A, InceptionA)\n",
    "        self.NewSK1=SKConv(384)\n",
    "        self.inception_a2 = self._generate_inception_module(384, 384, A, InceptionA)\n",
    "        self.reduction_a = ReductionA(384, k, l, m, n)\n",
    "        output_channels = self.reduction_a.output_channels\n",
    "        self.inception_b1 = self._generate_inception_module(output_channels, 1024, B, InceptionB)\n",
    "        self.NewSK2=SKConv(1024)\n",
    "        self.inception_b2 = self._generate_inception_module(output_channels, 1024, B, InceptionB)\n",
    "        self.reduction_b = ReductionB(1024)\n",
    "        self.inception_c1 = self._generate_inception_module(1536, 1536, C, InceptionC)\n",
    "        self.NewSK3=SKConv(1536)\n",
    "        self.inception_c1 = self._generate_inception_module(1536, 1536, C, InceptionC)\n",
    "        self.avgpool = nn.AvgPool2d(7)\n",
    "\n",
    "        #\"\"\"Dropout (keep 0.8)\"\"\"\n",
    "        self.dropout = nn.Dropout2d(1 - 0.8)\n",
    "        self.linear = nn.Linear(1536, class_nums)\n",
    "        \n",
    "        self.stem_avgpool=BasicConv2d(384,1024,kernel_size=4,stride=2,padding=0)\n",
    "        self.reduction_a_avgpool=BasicConv2d(1024,1536,kernel_size=2,stride=2,padding=0)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.stem(x)\n",
    "        #print(\"stem:\",x.shape)\n",
    "        y_link=x\n",
    "        y_link=self.stem_avgpool(y_link)\n",
    "        #1\n",
    "        x = self.inception_a1(x)\n",
    "        x = self.NewSK1(x)\n",
    "        x = self.inception_a2(x)\n",
    "        x = self.reduction_a(x)\n",
    "        #print(\"reduction_a:\",x.shape)\n",
    "        y_link=x+y_link\n",
    "        y_link=self.reduction_a_avgpool(y_link)\n",
    "        #2\n",
    "        x = self.inception_b1(x)\n",
    "        x = self.NewSK2(x)\n",
    "        x = self.inception_b2(x)\n",
    "        x = self.reduction_b(x)\n",
    "        #print(\"reduction_b:\",x.shape)\n",
    "        #print(\"y_link:\",y_link.shape)\n",
    "        y_link=x+y_link\n",
    "        y_link=self.avgpool(y_link)\n",
    "        #3\n",
    "        # x = self.inception_c1(x)\n",
    "        # x = self.NewSK3(x)\n",
    "        # x = self.inception_c1(x)\n",
    "        \n",
    "        #avgpool\n",
    "        #print(\"before_avgpool:\",x.shape)\n",
    "        #x = self.avgpool(x)\n",
    "        x = y_link\n",
    "        #print(\"avgpool:\",x.shape)\n",
    "        x = self.dropout(x)\n",
    "        x = x.view(-1, 1536)\n",
    "        x = self.linear(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "    @staticmethod\n",
    "    def _generate_inception_module(input_channels, output_channels, block_num, block):\n",
    "\n",
    "        layers = nn.Sequential()\n",
    "        for l in range(block_num):\n",
    "            layers.add_module(\"{}_{}\".format(block.__name__, l), block(input_channels))\n",
    "            input_channels = output_channels\n",
    "\n",
    "        return layers\n",
    "\n",
    "\n",
    "def inceptionv4():\n",
    "    return SKnet_InceptionV4(1, 1, 1)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0e6b28a7-1388-4c6d-ad6c-ff1e6eea503a",
   "metadata": {},
   "source": [
    "## TEST NET"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "6b29b14f-8499-42f4-a4a8-19a34c869e1a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out shape : torch.Size([8, 100])\n"
     ]
    }
   ],
   "source": [
    "## test\n",
    "import numpy as np\n",
    "x = torch.rand(8,3, 32, 32)\n",
    "net = inceptionv4()\n",
    "out = net(x)\n",
    "print('out shape : {}'.format(out.shape))\n",
    "from torchsummary import summary\n",
    "# y=np.random.rand(3,32,32)\n",
    "# y=y.to('cuda')\n",
    "# summary(net,y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "e64f36f1-6605-434e-b686-893449e632ff",
   "metadata": {},
   "outputs": [],
   "source": [
    "net = inceptionv4()\n",
    "#如果有gpu就使用gpu，否则使用cpu\n",
    "device = torch.device('cuda'if torch.cuda.is_available() else 'cpu')\n",
    "net = net.to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "57b4de0e-39ff-41c1-982b-c79353a166bd",
   "metadata": {},
   "source": [
    "## Load Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "74a00979-b815-4191-bfbf-467b3d55aebc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "#加载数据集\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.RandomCrop(32, padding=4),\n",
    "    transforms.RandomHorizontalFlip(),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.5071, 0.4865, 0.4409], std=[0.2673, 0.2564, 0.2762])\n",
    "])\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.5071, 0.4865, 0.4409], std=[0.2673, 0.2564, 0.2762])\n",
    "])\n",
    "\n",
    "train_dataset = torchvision.datasets.cifar.CIFAR100(root='./data/cifar100', train=True, transform=transform_train, download=True)\n",
    "test_dataset = torchvision.datasets.cifar.CIFAR100(root='./data/cifar100', train=False, transform=transform_test, download=True)\n",
    "\n",
    "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)\n",
    "test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=256, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "54276559-b268-4d6f-94c2-9b875b3997eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "#超参数设置\n",
    "epochs = 30\n",
    "BATCH_SIZE = 64\n",
    "LR = 0.01\n",
    "\n",
    "#定义损失函数和优化器\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)\n",
    "# 创建scheduler，每迭代5次，学习率衰减一半\n",
    "scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5, last_epoch=-1)\n",
    "\n",
    "# 训练网络\n",
    "def train(epoch, log_interval=128):\n",
    "    # Set model to training model\n",
    "    net.train()\n",
    "    for batch_idx, (data, target) in enumerate(train_loader):\n",
    "        # Copy data to GPU if needed\n",
    "        data = data.to(device)\n",
    "        target = target.to(device)\n",
    "        \n",
    "        # Zero gradient buffers\n",
    "        optimizer.zero_grad()\n",
    "        # Pass data through the network\n",
    "        output = net(data)\n",
    "        # Calculate loss\n",
    "        loss = criterion(output, target)\n",
    "        # Backpropagate\n",
    "        loss.backward()\n",
    "        # Update weights\n",
    "        optimizer.step()\n",
    "        \n",
    "        if batch_idx % log_interval == 0:\n",
    "            print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data.item()))\n",
    "\n",
    "# 验证网络\n",
    "def validate(loss_vector, accuracy_vector):\n",
    "    with torch.no_grad():\n",
    "        net.eval()\n",
    "        val_loss, correct = 0, 0\n",
    "        for data, target in test_loader:\n",
    "            data = data.to(device)\n",
    "            target = target.to(device)\n",
    "            output = net(data)\n",
    "            val_loss += criterion(output, target).data.item()\n",
    "            pred = output.data.max(1)[1]\n",
    "            correct += pred.eq(target.data).cpu().sum()\n",
    "\n",
    "        val_loss /= len(test_loader)\n",
    "        loss_vector.append(val_loss)\n",
    "\n",
    "        accuracy = 100. * correct.to(torch.float32) / len(test_loader.dataset)\n",
    "        accuracy_vector.append(accuracy)\n",
    "\n",
    "        print('\\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.04f}%)\\n'.format(val_loss, correct, len(test_loader.dataset), accuracy))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8bc465c5-0ef9-48b3-8f2f-973246781cfa",
   "metadata": {},
   "source": [
    "## main"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "013508d8-1a75-485c-8a2c-0eab20ec6c2b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train Epoch: 1 [0/50000 (0%)]\tLoss: 4.725432\n",
      "Train Epoch: 1 [16384/50000 (33%)]\tLoss: 4.193923\n",
      "Train Epoch: 1 [32768/50000 (65%)]\tLoss: 3.782380\n",
      "Train Epoch: 1 [49152/50000 (98%)]\tLoss: 3.678084\n",
      "\n",
      "Validation set: Average loss: 3.6814, Accuracy: 1459/10000 (14.5900%)\n",
      "\n",
      "Train Epoch: 2 [0/50000 (0%)]\tLoss: 3.537844\n",
      "Train Epoch: 2 [16384/50000 (33%)]\tLoss: 3.255136\n",
      "Train Epoch: 2 [32768/50000 (65%)]\tLoss: 3.165486\n",
      "Train Epoch: 2 [49152/50000 (98%)]\tLoss: 2.873834\n",
      "\n",
      "Validation set: Average loss: 3.2084, Accuracy: 2223/10000 (22.2300%)\n",
      "\n",
      "Train Epoch: 3 [0/50000 (0%)]\tLoss: 3.001401\n",
      "Train Epoch: 3 [16384/50000 (33%)]\tLoss: 2.890298\n",
      "Train Epoch: 3 [32768/50000 (65%)]\tLoss: 2.769930\n",
      "Train Epoch: 3 [49152/50000 (98%)]\tLoss: 2.603620\n",
      "\n",
      "Validation set: Average loss: 2.7303, Accuracy: 3012/10000 (30.1200%)\n",
      "\n",
      "Train Epoch: 4 [0/50000 (0%)]\tLoss: 2.511073\n",
      "Train Epoch: 4 [16384/50000 (33%)]\tLoss: 2.544171\n",
      "Train Epoch: 4 [32768/50000 (65%)]\tLoss: 2.380512\n",
      "Train Epoch: 4 [49152/50000 (98%)]\tLoss: 2.136357\n",
      "\n",
      "Validation set: Average loss: 2.5064, Accuracy: 3811/10000 (38.1100%)\n",
      "\n",
      "Train Epoch: 5 [0/50000 (0%)]\tLoss: 2.131584\n",
      "Train Epoch: 5 [16384/50000 (33%)]\tLoss: 2.052100\n",
      "Train Epoch: 5 [32768/50000 (65%)]\tLoss: 2.056054\n",
      "Train Epoch: 5 [49152/50000 (98%)]\tLoss: 1.908505\n",
      "\n",
      "Validation set: Average loss: 2.2694, Accuracy: 4016/10000 (40.1600%)\n",
      "\n",
      "Train Epoch: 6 [0/50000 (0%)]\tLoss: 1.875468\n",
      "Train Epoch: 6 [16384/50000 (33%)]\tLoss: 1.754418\n",
      "Train Epoch: 6 [32768/50000 (65%)]\tLoss: 1.895003\n",
      "Train Epoch: 6 [49152/50000 (98%)]\tLoss: 1.810244\n",
      "\n",
      "Validation set: Average loss: 1.9976, Accuracy: 4633/10000 (46.3300%)\n",
      "\n",
      "Train Epoch: 7 [0/50000 (0%)]\tLoss: 1.619081\n",
      "Train Epoch: 7 [16384/50000 (33%)]\tLoss: 1.466638\n",
      "Train Epoch: 7 [32768/50000 (65%)]\tLoss: 1.782785\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "# 训练网络、打印训练过程\n",
    "if __name__ == \"__main__\":\n",
    "    import os\n",
    "    os.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"max_split_size_mb:128\"\n",
    "    lossv, accv = [], []\n",
    "    for epoch in range(1, epochs + 1):\n",
    "        train(epoch)\n",
    "        validate(lossv, accv)\n",
    "\n",
    "    print('Finished Training')\n",
    "\n",
    "    # 绘制训练损失、精确度\n",
    "    print(\"训练损失:{}\\n训练精确度:{}%\".format(lossv[-1], accv[-1].item()))\n",
    "    plt.figure(figsize=(5,3))\n",
    "    plt.plot(np.arange(1,epochs+1), lossv)\n",
    "    plt.title('validation loss')\n",
    "\n",
    "    plt.figure(figsize=(5,3))\n",
    "    plt.plot(np.arange(1,epochs+1), accv)\n",
    "    plt.title('validation accuracy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7d07430b-16da-4029-a8c8-acde153d56e1",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"训练损失:{}\\n训练精确度:{}%\".format(lossv[-1], accv[-1].item()))\n",
    "plt.figure(figsize=(5,3))\n",
    "plt.plot(np.arange(1,epochs+1), lossv)\n",
    "plt.title('validation loss')\n",
    "plt.savefig('figs/SKnet_inception_loss.png')\n",
    "\n",
    "plt.figure(figsize=(5,3))\n",
    "plt.plot(np.arange(1,epochs+1), accv)\n",
    "plt.title('validation accuracy')\n",
    "plt.savefig('figs/SKnet_inception_accuracy.png')\n",
    "path=\"SKnet_inception\"+str(epochs)+\".pth\"\n",
    "torch.save(net.state_dict(),path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "07f8c733-503f-46e5-9bd0-d7a5b196f143",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dc21dc34-c378-4e7f-9715-e3c98a62dda1",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
