{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from res2net_Fca import *\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "from useful_tools import *\n",
    "import math\n",
    "import sys"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "batch_size = 32\n",
    "trainloader, testloader, classes = get_dataLoader(batch_size)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_class = len(classes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 179,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_1d_dct(i, freq, L):\n",
    "    result = math.cos(math.pi * freq * (i + 0.5) / L) / math.sqrt(L)\n",
    "    if freq == 0: \n",
    "        return result \n",
    "    else: \n",
    "        return result * math.sqrt(2) \n",
    "def get_dct_weights( width, height, channel, fidx_u= [0,1,0,5,2,0,2,0,0,6,0,4,6,3,2,5], fidx_v= [0,0,6,0,0,1,1,4,5,1,3,0,0,0,2,3]):\n",
    "    # width : width of input \n",
    "    # height : height of input \n",
    "    # channel : channel of input \n",
    "    # fidx_u : horizontal indices of selected fequency \n",
    "    # according to the paper, should be [0,0,6,0,0,1,1,4,5,1,3,0,0,0,2,3]\n",
    "    # fidx_v : vertical indices of selected fequency \n",
    "    # according to the paper, should be [0,1,0,5,2,0,2,0,0,6,0,4,6,3,2,5]\n",
    "    # [0,0],[0,1],[6,0],[0,5],[0,2],[1,0],[1,2],[4,0],\n",
    "    # [5,0],[1,6],[3,0],[0,4],[0,6],[0,3],[2,2],[3,5],\n",
    "    dct_weights = torch.zeros(1, channel, width, height)\n",
    "    #channel为什么是torch.Size([32, 256, 8, 8])\n",
    "    c_part = channel // len(fidx_u) \n",
    "    # split channel for multi-spectal attention \n",
    "    for i, (u_x, v_y) in enumerate(zip(fidx_u, fidx_v)): \n",
    "        for t_x in range(width): \n",
    "            for t_y in range(height): \n",
    "                dct_weights[:, i * c_part: (i+1)*c_part, t_x, t_y]\\\n",
    "                =get_1d_dct(t_x, u_x, width) * get_1d_dct(t_y, v_y, height) \n",
    "    # Eq. 7 in our paper \n",
    "    return dct_weights \n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 180,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 输入输出相同\n",
    "\n",
    "class FcaLayer(nn.Module):\n",
    "    def __init__(self,\n",
    "                 channel,\n",
    "                 reduction,width,height):\n",
    "        super(FcaLayer, self).__init__()\n",
    "        self.register_buffer('pre_computed_dct_weights',get_dct_weights(width,height,channel).to(device))\n",
    "        #self.register_parameter('pre_computed_dct_weights',torch.nn.Parameter(get_dct_weights(width,height,channel)))\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(channel, channel // reduction, bias=False),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Linear(channel // reduction, channel, bias=False),\n",
    "            nn.Sigmoid()\n",
    "        ).to(device)\n",
    "\n",
    "    def forward(self, x):\n",
    "        b, c, _, _ = x.size()\n",
    "        y = torch.sum(x*self.pre_computed_dct_weights,dim=(2,3)).to(device)\n",
    "        y = self.fc(y).view(b, c, 1, 1)\n",
    "        return x * y.expand_as(x)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 291,
   "metadata": {},
   "outputs": [],
   "source": [
    "def show_shape(item):\n",
    "    print(item.shape)\n",
    "class Bottleneck(nn.Module):\n",
    "    expansion = 4\n",
    "\n",
    "    def __init__(self, inplanes, planes, stride=1, downsample=None, scales=4, groups=1, se=True):\n",
    "        super(Bottleneck, self).__init__()\n",
    "        self.downsample = downsample\n",
    "        self.scales = scales\n",
    "        self.groups = groups\n",
    "        self.stride = stride\n",
    "\n",
    "        outplanes = groups * planes\n",
    "\n",
    "        self.conv1 = nn.Conv2d(in_channels=inplanes, out_channels=outplanes, kernel_size=1, stride=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(outplanes)\n",
    "\n",
    "        self.conv2 = nn.ModuleList([nn.Conv2d(outplanes // scales, outplanes // scales,\n",
    "                                              kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) for _\n",
    "                                    in range(scales - 1)])\n",
    "        # self.conv2 = nn.ModuleList([\n",
    "        #     nn.Conv2d(outplanes // scales, outplanes // scales, kernel_size=3, stride=1, padding=1, groups=groups, bias=False)\n",
    "        #     if i != (scales - 2)\n",
    "        #     else nn.Conv2d(outplanes // scales, outplanes // scales, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)\n",
    "        #     for i in range(scales - 1)\n",
    "        # ])\n",
    "        \n",
    "        self.bn2 = nn.ModuleList([nn.BatchNorm2d(outplanes // scales) for _ in range(scales - 1)])\n",
    "\n",
    "        self.conv3 = nn.Conv2d(outplanes, planes * self.expansion, kernel_size=1, stride=1, bias=False)\n",
    "        self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n",
    "\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "        # self.se = FcaLayer(planes * self.expansion,width,height) if se else None\n",
    "        self.fac = None\n",
    "        self.pool = nn.AvgPool2d(kernel_size=2)\n",
    "\n",
    "    def forward(self, x):\n",
    "        identity = x\n",
    "\n",
    "        if self.downsample is not None:\n",
    "            identity = self.downsample(identity)\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        x_scales = torch.chunk(out, self.scales, 1)\n",
    "        #print(list(map(show_shape, x_scales)))\n",
    "        #print(x_scales[4])\n",
    "        # for index, i in enumerate(x_scales):\n",
    "        #     # for index_p, p in enumerate(i):\n",
    "        #     #     print(index, index_p)\n",
    "        #     print(index, i.shape)\n",
    "\n",
    "        \n",
    "        for i in range(self.scales - 1):\n",
    "            if i == 0:\n",
    "                y_scale = x_scales[i]\n",
    "                #print(0, y_scale.shape)\n",
    "            else:\n",
    "                #print(y_scale.shape, x_scales[i].shape)\n",
    "                y_scale = y_scale + x_scales[i]\n",
    "                \n",
    "            y_scale_reshape = y_scale\n",
    "            y_scale_reshape = self.conv2[i](y_scale)\n",
    "            y_scale_reshape = self.relu(self.bn2[i](y_scale_reshape))\n",
    "            \n",
    "            if i == 0:\n",
    "                out = y_scale_reshape\n",
    "            else:\n",
    "                #print(out.shape, y_scale_reshape.shape, y_scale.shape)\n",
    "                out = torch.cat((out, y_scale_reshape), 1)\n",
    "\n",
    "\n",
    "\n",
    "        if out.size()[2] != x_scales[self.scales - 1].size()[2]:\n",
    "            out = torch.cat((out, self.pool(x_scales[self.scales - 1])), 1)\n",
    "        elif self.scales != 1:\n",
    "            out = torch.cat((out, x_scales[self.scales - 1]), 1)\n",
    "            \n",
    "\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)\n",
    "\n",
    "        #print(out.size())\n",
    "\n",
    "\n",
    "        #out = FcaLayer(channel, 16, width, height) \n",
    "        if self.fac is None:\n",
    "            channel = out.size(1)\n",
    "            width = out.size(2)\n",
    "            height = out.size(3)\n",
    "            self.fac = FcaLayer(channel, 16, width, height) \n",
    "        out = self.fac(out)\n",
    "        \n",
    "\n",
    "\n",
    "        out += identity\n",
    "        out = self.relu(out)\n",
    "\n",
    "        return out\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 292,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Res2Net(nn.Module):\n",
    "    def __init__(self, block, layers, num_classes=num_class, scales=4, groups=1, se=True):\n",
    "        super(Res2Net, self).__init__()\n",
    "        self.inplanes = 64\n",
    "\n",
    "        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(self.inplanes)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "\n",
    "        self.layer1 = self._make_layer(block, 64, layers[0], stride=1, scales=scales, groups=groups, se=se)\n",
    "        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, scales=scales, groups=groups, se=se)\n",
    "        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, scales=scales, groups=groups, se=se)\n",
    "        self.layer4 = self._make_layer(block, 512, layers[3], stride=2, scales=scales, groups=groups, se=se)\n",
    "\n",
    "        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n",
    "        self.fc = nn.Linear(512 * block.expansion, num_classes)\n",
    "\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n",
    "            elif isinstance(m, nn.BatchNorm2d):\n",
    "                nn.init.constant_(m.weight, 1)\n",
    "                nn.init.constant_(m.bias, 0)\n",
    "\n",
    "    def _make_layer(self, block, planes, layer, stride=1, scales=4, groups=1, se=True):\n",
    "        downsample = None\n",
    "        if stride != 1 or self.inplanes != planes * block.expansion:\n",
    "            downsample = nn.Sequential(\n",
    "                nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(planes * block.expansion),\n",
    "            )\n",
    "\n",
    "        layers = []\n",
    "        layers.append(block(self.inplanes, planes, stride=stride, downsample=downsample,\n",
    "                            scales=scales, groups=groups, se=se))\n",
    "        self.inplanes = planes * block.expansion\n",
    "\n",
    "        for i in range(1, layer):\n",
    "            layers.append(block(self.inplanes, planes, scales=scales, groups=groups, se=se))\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = self.layer4(x)\n",
    "\n",
    "        x = self.avgpool(x)\n",
    "        x = torch.flatten(x, 1)\n",
    "        x = self.fc(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "def res2net50_se(num_classes=1000, scales=4, groups=1):\n",
    "    return Res2Net(Bottleneck, [3, 4, 6, 3], num_classes, scales, groups, se=True)\n",
    "\n",
    "def res2net18_fca(num_classes=1000, scales=4, groups=1, se=True):\n",
    "    return Res2Net(Bottleneck, [2, 2, 2, 2], num_classes, scales, groups, se)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 293,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda:0\n"
     ]
    }
   ],
   "source": [
    "#一些参数的设置\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "learning_rate=0.05\n",
    "momentum=0.9\n",
    "weight_decay=0.0001\n",
    "batch_size=128\n",
    "epochs=25\n",
    "data_path='./data'\n",
    "model_name='Res2net_FcaNet'+'epoch='+str(epochs)\n",
    "print(device)\n",
    "model= res2net18_fca().to(device)\n",
    "criterion=nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 294,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch:0,train_loss:4.3288,train_acc:9.3520,train_top5_acc:28.4200,test_loss:3.6440,test_acc:15.2100,test_top5_acc:40.0100\n",
      "epoch:1,train_loss:3.4581,train_acc:17.5280,train_top5_acc:43.6680,test_loss:5.1246,test_acc:20.5500,test_top5_acc:48.4500\n",
      "epoch:2,train_loss:3.1468,train_acc:22.7740,train_top5_acc:52.0160,test_loss:3.9330,test_acc:26.5400,test_top5_acc:55.2800\n",
      "epoch:3,train_loss:2.9751,train_acc:26.2800,train_top5_acc:56.2820,test_loss:3.5689,test_acc:27.9000,test_top5_acc:57.0200\n",
      "epoch:4,train_loss:2.7280,train_acc:30.7820,train_top5_acc:62.2180,test_loss:3.3654,test_acc:30.6500,test_top5_acc:61.6300\n",
      "epoch:5,train_loss:2.4939,train_acc:35.3260,train_top5_acc:67.4240,test_loss:3.2566,test_acc:33.7500,test_top5_acc:63.3600\n",
      "epoch:6,train_loss:2.3136,train_acc:39.3280,train_top5_acc:71.4620,test_loss:3.1475,test_acc:35.5500,test_top5_acc:64.7700\n",
      "epoch:7,train_loss:2.1447,train_acc:42.8640,train_top5_acc:74.8880,test_loss:2.8323,test_acc:35.8100,test_top5_acc:65.3000\n",
      "epoch:8,train_loss:1.9484,train_acc:47.3300,train_top5_acc:78.5120,test_loss:2.6786,test_acc:37.1400,test_top5_acc:66.5800\n",
      "epoch:9,train_loss:1.7852,train_acc:50.8080,train_top5_acc:81.7760,test_loss:2.5836,test_acc:38.3900,test_top5_acc:67.9400\n",
      "epoch:10,train_loss:1.6283,train_acc:54.3740,train_top5_acc:84.7160,test_loss:2.7498,test_acc:37.6800,test_top5_acc:66.8700\n",
      "epoch:11,train_loss:1.4701,train_acc:57.7680,train_top5_acc:87.2720,test_loss:2.6186,test_acc:38.7900,test_top5_acc:67.8600\n",
      "epoch:12,train_loss:1.3291,train_acc:61.7060,train_top5_acc:89.5120,test_loss:2.6640,test_acc:38.7300,test_top5_acc:68.2300\n",
      "epoch:13,train_loss:1.2082,train_acc:64.5800,train_top5_acc:91.4960,test_loss:2.7170,test_acc:38.0000,test_top5_acc:67.9100\n",
      "epoch:14,train_loss:1.0736,train_acc:68.1040,train_top5_acc:93.2740,test_loss:2.7676,test_acc:39.2100,test_top5_acc:67.2800\n",
      "epoch:15,train_loss:0.9681,train_acc:71.0020,train_top5_acc:94.5540,test_loss:2.8590,test_acc:38.4000,test_top5_acc:67.6600\n",
      "epoch:16,train_loss:0.8813,train_acc:73.1940,train_top5_acc:95.6360,test_loss:2.8381,test_acc:39.3700,test_top5_acc:67.9200\n",
      "epoch:17,train_loss:0.7872,train_acc:75.9980,train_top5_acc:96.5300,test_loss:2.9282,test_acc:39.1900,test_top5_acc:67.1200\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[294], line 5\u001b[0m\n\u001b[1;32m      3\u001b[0m train_top5_accs, test_top5_accs \u001b[38;5;241m=\u001b[39m [], []\n\u001b[1;32m      4\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(epochs):\n\u001b[0;32m----> 5\u001b[0m     train_loss,train_acc,train_top5_acc\u001b[38;5;241m=\u001b[39m\u001b[43mnew_train\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtrainloader\u001b[49m\u001b[43m,\u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      6\u001b[0m     test_loss,test_acc,test_top5_acc\u001b[38;5;241m=\u001b[39mtest(model,testloader,criterion,device)\n\u001b[1;32m      7\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mepoch:\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m,train_loss:\u001b[39m\u001b[38;5;132;01m{:.4f}\u001b[39;00m\u001b[38;5;124m,train_acc:\u001b[39m\u001b[38;5;132;01m{:.4f}\u001b[39;00m\u001b[38;5;124m,train_top5_acc:\u001b[39m\u001b[38;5;132;01m{:.4f}\u001b[39;00m\u001b[38;5;124m,test_loss:\u001b[39m\u001b[38;5;132;01m{:.4f}\u001b[39;00m\u001b[38;5;124m,test_acc:\u001b[39m\u001b[38;5;132;01m{:.4f}\u001b[39;00m\u001b[38;5;124m,test_top5_acc:\u001b[39m\u001b[38;5;132;01m{:.4f}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(epoch,train_loss,train_acc,train_top5_acc,test_loss,test_acc,test_top5_acc))\n",
      "File \u001b[0;32m~/deep-learning/FuTong/useful_tools.py:264\u001b[0m, in \u001b[0;36mnew_train\u001b[0;34m(model, trainloader, criterion, optimizer, device)\u001b[0m\n\u001b[1;32m    262\u001b[0m output\u001b[38;5;241m=\u001b[39mmodel(data)\n\u001b[1;32m    263\u001b[0m loss\u001b[38;5;241m=\u001b[39mcriterion(output,target)\n\u001b[0;32m--> 264\u001b[0m \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    265\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mstep()\n\u001b[1;32m    266\u001b[0m train_loss\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39mloss\u001b[38;5;241m.\u001b[39mitem()\n",
      "File \u001b[0;32m~/miniconda3/lib/python3.10/site-packages/torch/_tensor.py:487\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m    477\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    478\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m    479\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m    480\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    485\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m    486\u001b[0m     )\n\u001b[0;32m--> 487\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    488\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[1;32m    489\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/lib/python3.10/site-packages/torch/autograd/__init__.py:200\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m    195\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m    197\u001b[0m \u001b[38;5;66;03m# The reason we repeat same the comment below is that\u001b[39;00m\n\u001b[1;32m    198\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m    199\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 200\u001b[0m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m    201\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    202\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "train_losses, test_losses = [], []\n",
    "train_accs, test_accs = [], []\n",
    "train_top5_accs, test_top5_accs = [], []\n",
    "for epoch in range(epochs):\n",
    "    train_loss,train_acc,train_top5_acc=new_train(model,trainloader,criterion,optimizer,device)\n",
    "    test_loss,test_acc,test_top5_acc=test(model,testloader,criterion,device)\n",
    "    print('epoch:{},train_loss:{:.4f},train_acc:{:.4f},train_top5_acc:{:.4f},test_loss:{:.4f},test_acc:{:.4f},test_top5_acc:{:.4f}'.format(epoch,train_loss,train_acc,train_top5_acc,test_loss,test_acc,test_top5_acc))\n",
    "\n",
    "    train_losses.append(train_loss)\n",
    "    test_losses.append(test_loss)\n",
    "    train_accs.append(train_acc)\n",
    "    test_accs.append(test_acc)\n",
    "    train_top5_accs.append(train_top5_acc)\n",
    "    test_top5_accs.append(test_top5_acc)\n",
    "\n",
    "#画图\n",
    "torch.save(model.state_dict(), model_name+'.pth')\n",
    "plot_training_results(train_losses, train_accs, train_accs, test_losses, test_accs, test_top5_accs)\n",
    "save_log(model_name,train_accs,test_accs,train_losses,test_losses,train_top5_accs,test_top5_accs,output_dir='../train_process')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
