{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "import torchvision.models as models\n",
    "import torchvision.transforms as transforms\n",
    "from torchvision.transforms import RandAugment\n",
    "from torch.optim import lr_scheduler\n",
    "import numpy as np\n",
    "import time\n",
    "import copy\n",
    "import matplotlib.pyplot as plt\n",
    "from CcNet import *\n",
    "from utils import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda:0\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(device)\n",
    " ## 修改模型\n",
    "net = CcNet().to(device)\n",
    "num_epochs=50\n",
    " # 定义损失函数和优化器\n",
    "trainloader,testloader = get_data_loader()\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n",
    "scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)\n",
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 0/49 轮训练\n",
      "----------\n",
      "train 损失: 3.9271 Top-1 准确率: 0.0998 Top-5 准确率: 0.2950\n",
      "val 损失: 3.8046 Top-1 准确率: 0.1280 Top-5 准确率: 0.3554\n",
      "\n",
      "第 1/49 轮训练\n",
      "----------\n",
      "train 损失: 3.1399 Top-1 准确率: 0.2223 Top-5 准确率: 0.5177\n",
      "val 损失: 3.1683 Top-1 准确率: 0.2260 Top-5 准确率: 0.5332\n",
      "\n",
      "第 2/49 轮训练\n",
      "----------\n",
      "train 损失: 2.6582 Top-1 准确率: 0.3133 Top-5 准确率: 0.6365\n",
      "val 损失: 2.8908 Top-1 准确率: 0.2845 Top-5 准确率: 0.6040\n",
      "\n",
      "第 3/49 轮训练\n",
      "----------\n",
      "train 损失: 2.3432 Top-1 准确率: 0.3799 Top-5 准确率: 0.7084\n",
      "val 损失: 2.5964 Top-1 准确率: 0.3521 Top-5 准确率: 0.6676\n",
      "\n",
      "第 4/49 轮训练\n",
      "----------\n",
      "train 损失: 2.1268 Top-1 准确率: 0.4300 Top-5 准确率: 0.7529\n",
      "val 损失: 2.2671 Top-1 准确率: 0.4019 Top-5 准确率: 0.7169\n",
      "\n",
      "第 5/49 轮训练\n",
      "----------\n",
      "train 损失: 1.9606 Top-1 准确率: 0.4683 Top-5 准确率: 0.7815\n",
      "val 损失: 2.2383 Top-1 准确率: 0.4173 Top-5 准确率: 0.7417\n",
      "\n",
      "第 6/49 轮训练\n",
      "----------\n",
      "train 损失: 1.8174 Top-1 准确率: 0.4989 Top-5 准确率: 0.8104\n",
      "val 损失: 2.2668 Top-1 准确率: 0.4264 Top-5 准确率: 0.7440\n",
      "\n",
      "第 7/49 轮训练\n",
      "----------\n",
      "train 损失: 1.7006 Top-1 准确率: 0.5266 Top-5 准确率: 0.8301\n",
      "val 损失: 2.0729 Top-1 准确率: 0.4468 Top-5 准确率: 0.7673\n",
      "\n",
      "第 8/49 轮训练\n",
      "----------\n",
      "train 损失: 1.6094 Top-1 准确率: 0.5513 Top-5 准确率: 0.8448\n",
      "val 损失: 2.0253 Top-1 准确率: 0.4890 Top-5 准确率: 0.7875\n",
      "\n",
      "第 9/49 轮训练\n",
      "----------\n",
      "train 损失: 1.5239 Top-1 准确率: 0.5722 Top-5 准确率: 0.8581\n",
      "val 损失: 1.9131 Top-1 准确率: 0.4985 Top-5 准确率: 0.8017\n",
      "\n",
      "第 10/49 轮训练\n",
      "----------\n",
      "train 损失: 1.4494 Top-1 准确率: 0.5922 Top-5 准确率: 0.8698\n",
      "val 损失: 1.8368 Top-1 准确率: 0.5209 Top-5 准确率: 0.8091\n",
      "\n",
      "第 11/49 轮训练\n",
      "----------\n",
      "train 损失: 1.3858 Top-1 准确率: 0.6077 Top-5 准确率: 0.8803\n",
      "val 损失: 1.7342 Top-1 准确率: 0.5290 Top-5 准确率: 0.8295\n",
      "\n",
      "第 12/49 轮训练\n",
      "----------\n",
      "train 损失: 1.3224 Top-1 准确率: 0.6236 Top-5 准确率: 0.8879\n",
      "val 损失: 2.0284 Top-1 准确率: 0.4931 Top-5 准确率: 0.7912\n",
      "\n",
      "第 13/49 轮训练\n",
      "----------\n",
      "train 损失: 1.2706 Top-1 准确率: 0.6355 Top-5 准确率: 0.8950\n",
      "val 损失: 1.5732 Top-1 准确率: 0.5710 Top-5 准确率: 0.8492\n",
      "\n",
      "第 14/49 轮训练\n",
      "----------\n",
      "train 损失: 1.2153 Top-1 准确率: 0.6517 Top-5 准确率: 0.9040\n",
      "val 损失: 1.4638 Top-1 准确率: 0.5975 Top-5 准确率: 0.8665\n",
      "\n",
      "第 15/49 轮训练\n",
      "----------\n",
      "train 损失: 1.1719 Top-1 准确率: 0.6628 Top-5 准确率: 0.9088\n",
      "val 损失: 1.7867 Top-1 准确率: 0.5457 Top-5 准确率: 0.8299\n",
      "\n",
      "第 16/49 轮训练\n",
      "----------\n",
      "train 损失: 1.1259 Top-1 准确率: 0.6751 Top-5 准确率: 0.9157\n",
      "val 损失: 1.6030 Top-1 准确率: 0.5819 Top-5 准确率: 0.8587\n",
      "\n",
      "第 17/49 轮训练\n",
      "----------\n",
      "train 损失: 1.0919 Top-1 准确率: 0.6831 Top-5 准确率: 0.9199\n",
      "val 损失: 1.6180 Top-1 准确率: 0.5713 Top-5 准确率: 0.8505\n",
      "\n",
      "第 18/49 轮训练\n",
      "----------\n",
      "train 损失: 1.0521 Top-1 准确率: 0.6949 Top-5 准确率: 0.9242\n",
      "val 损失: 1.4936 Top-1 准确率: 0.5936 Top-5 准确率: 0.8634\n",
      "\n",
      "第 19/49 轮训练\n",
      "----------\n",
      "train 损失: 1.0235 Top-1 准确率: 0.7009 Top-5 准确率: 0.9271\n",
      "val 损失: 1.6491 Top-1 准确率: 0.5664 Top-5 准确率: 0.8479\n",
      "\n",
      "第 20/49 轮训练\n",
      "----------\n",
      "train 损失: 0.9754 Top-1 准确率: 0.7163 Top-5 准确率: 0.9336\n",
      "val 损失: 1.3899 Top-1 准确率: 0.6161 Top-5 准确率: 0.8807\n",
      "\n",
      "第 21/49 轮训练\n",
      "----------\n",
      "train 损失: 0.9560 Top-1 准确率: 0.7228 Top-5 准确率: 0.9359\n",
      "val 损失: 1.5308 Top-1 准确率: 0.5957 Top-5 准确率: 0.8658\n",
      "\n",
      "第 22/49 轮训练\n",
      "----------\n",
      "train 损失: 0.9189 Top-1 准确率: 0.7306 Top-5 准确率: 0.9416\n",
      "val 损失: 1.3468 Top-1 准确率: 0.6346 Top-5 准确率: 0.8831\n",
      "\n",
      "第 23/49 轮训练\n",
      "----------\n",
      "train 损失: 0.8834 Top-1 准确率: 0.7421 Top-5 准确率: 0.9443\n",
      "val 损失: 1.6525 Top-1 准确率: 0.5845 Top-5 准确率: 0.8552\n",
      "\n",
      "第 24/49 轮训练\n",
      "----------\n",
      "train 损失: 0.8678 Top-1 准确率: 0.7455 Top-5 准确率: 0.9453\n",
      "val 损失: 1.4692 Top-1 准确率: 0.6139 Top-5 准确率: 0.8714\n",
      "\n",
      "第 25/49 轮训练\n",
      "----------\n",
      "train 损失: 0.8314 Top-1 准确率: 0.7550 Top-5 准确率: 0.9500\n",
      "val 损失: 1.6053 Top-1 准确率: 0.5981 Top-5 准确率: 0.8607\n",
      "\n",
      "第 26/49 轮训练\n",
      "----------\n",
      "train 损失: 0.8113 Top-1 准确率: 0.7615 Top-5 准确率: 0.9529\n",
      "val 损失: 1.3701 Top-1 准确率: 0.6318 Top-5 准确率: 0.8886\n",
      "\n",
      "第 27/49 轮训练\n",
      "----------\n",
      "train 损失: 0.7774 Top-1 准确率: 0.7703 Top-5 准确率: 0.9562\n",
      "val 损失: 1.4330 Top-1 准确率: 0.6256 Top-5 准确率: 0.8795\n",
      "\n",
      "第 28/49 轮训练\n",
      "----------\n",
      "train 损失: 0.7539 Top-1 准确率: 0.7771 Top-5 准确率: 0.9583\n",
      "val 损失: 1.4979 Top-1 准确率: 0.6134 Top-5 准确率: 0.8734\n",
      "\n",
      "第 29/49 轮训练\n",
      "----------\n",
      "train 损失: 0.7250 Top-1 准确率: 0.7842 Top-5 准确率: 0.9629\n",
      "val 损失: 1.2856 Top-1 准确率: 0.6469 Top-5 准确率: 0.8951\n",
      "\n",
      "第 30/49 轮训练\n",
      "----------\n",
      "train 损失: 0.7009 Top-1 准确率: 0.7923 Top-5 准确率: 0.9649\n",
      "val 损失: 1.3782 Top-1 准确率: 0.6412 Top-5 准确率: 0.8782\n",
      "\n",
      "第 31/49 轮训练\n",
      "----------\n",
      "train 损失: 0.6874 Top-1 准确率: 0.7957 Top-5 准确率: 0.9656\n",
      "val 损失: 1.4492 Top-1 准确率: 0.6271 Top-5 准确率: 0.8786\n",
      "\n",
      "第 32/49 轮训练\n",
      "----------\n",
      "train 损失: 0.6578 Top-1 准确率: 0.8068 Top-5 准确率: 0.9683\n",
      "val 损失: 1.3906 Top-1 准确率: 0.6453 Top-5 准确率: 0.8860\n",
      "\n",
      "第 33/49 轮训练\n",
      "----------\n",
      "train 损失: 0.6417 Top-1 准确率: 0.8103 Top-5 准确率: 0.9704\n",
      "val 损失: 1.5001 Top-1 准确率: 0.6188 Top-5 准确率: 0.8772\n",
      "\n",
      "第 34/49 轮训练\n",
      "----------\n",
      "train 损失: 0.6267 Top-1 准确率: 0.8157 Top-5 准确率: 0.9710\n",
      "val 损失: 1.5899 Top-1 准确率: 0.6052 Top-5 准确率: 0.8659\n",
      "\n",
      "第 35/49 轮训练\n",
      "----------\n",
      "train 损失: 0.5948 Top-1 准确率: 0.8259 Top-5 准确率: 0.9730\n",
      "val 损失: 1.2887 Top-1 准确率: 0.6737 Top-5 准确率: 0.8993\n",
      "\n",
      "第 36/49 轮训练\n",
      "----------\n",
      "train 损失: 0.5702 Top-1 准确率: 0.8325 Top-5 准确率: 0.9759\n",
      "val 损失: 1.3042 Top-1 准确率: 0.6646 Top-5 准确率: 0.8970\n",
      "\n",
      "第 37/49 轮训练\n",
      "----------\n",
      "train 损失: 0.5644 Top-1 准确率: 0.8342 Top-5 准确率: 0.9768\n",
      "val 损失: 1.3273 Top-1 准确率: 0.6577 Top-5 准确率: 0.8961\n",
      "\n",
      "第 38/49 轮训练\n",
      "----------\n",
      "train 损失: 0.5432 Top-1 准确率: 0.8394 Top-5 准确率: 0.9791\n",
      "val 损失: 1.3078 Top-1 准确率: 0.6665 Top-5 准确率: 0.8983\n",
      "\n",
      "第 39/49 轮训练\n",
      "----------\n",
      "train 损失: 0.5275 Top-1 准确率: 0.8446 Top-5 准确率: 0.9796\n",
      "val 损失: 1.2635 Top-1 准确率: 0.6721 Top-5 准确率: 0.9015\n",
      "\n",
      "第 40/49 轮训练\n",
      "----------\n",
      "train 损失: 0.5085 Top-1 准确率: 0.8520 Top-5 准确率: 0.9815\n",
      "val 损失: 1.3268 Top-1 准确率: 0.6623 Top-5 准确率: 0.8976\n",
      "\n",
      "第 41/49 轮训练\n",
      "----------\n",
      "train 损失: 0.4994 Top-1 准确率: 0.8528 Top-5 准确率: 0.9821\n",
      "val 损失: 1.3717 Top-1 准确率: 0.6601 Top-5 准确率: 0.8932\n",
      "\n",
      "第 42/49 轮训练\n",
      "----------\n",
      "train 损失: 0.4815 Top-1 准确率: 0.8584 Top-5 准确率: 0.9834\n",
      "val 损失: 1.3050 Top-1 准确率: 0.6693 Top-5 准确率: 0.9007\n",
      "\n",
      "第 43/49 轮训练\n",
      "----------\n",
      "train 损失: 0.4610 Top-1 准确率: 0.8665 Top-5 准确率: 0.9833\n",
      "val 损失: 1.3050 Top-1 准确率: 0.6723 Top-5 准确率: 0.8992\n",
      "\n",
      "第 44/49 轮训练\n",
      "----------\n",
      "train 损失: 0.4530 Top-1 准确率: 0.8673 Top-5 准确率: 0.9847\n",
      "val 损失: 1.3504 Top-1 准确率: 0.6643 Top-5 准确率: 0.8947\n",
      "\n",
      "第 45/49 轮训练\n",
      "----------\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model,(train_losses, train_top1_accs, train_top5_accs, val_losses, val_top1_accs, val_top5_accs) \u001b[38;5;241m=\u001b[39m \u001b[43mtrain_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnet\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtrainloader\u001b[49m\u001b[43m,\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtestloader\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscheduler\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_epochs\u001b[49m\u001b[43m)\u001b[49m  \n",
      "File \u001b[0;32m~/deep-learning/FuTong/发起总攻/utils.py:159\u001b[0m, in \u001b[0;36mtrain_model\u001b[0;34m(model, trainloader, device, testloader, criterion, optimizer, scheduler, num_epochs)\u001b[0m\n\u001b[1;32m    157\u001b[0m     \u001b[38;5;66;03m# 只有在训练阶段反向传播+优化\u001b[39;00m\n\u001b[1;32m    158\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m phase \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrain\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[0;32m--> 159\u001b[0m         \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    160\u001b[0m         optimizer\u001b[38;5;241m.\u001b[39mstep()\n\u001b[1;32m    162\u001b[0m \u001b[38;5;66;03m# 统计\u001b[39;00m\n",
      "File \u001b[0;32m~/miniconda3/lib/python3.10/site-packages/torch/_tensor.py:487\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m    477\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    478\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m    479\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m    480\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    485\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m    486\u001b[0m     )\n\u001b[0;32m--> 487\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    488\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[1;32m    489\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/lib/python3.10/site-packages/torch/autograd/__init__.py:200\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m    195\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m    197\u001b[0m \u001b[38;5;66;03m# The reason we repeat same the comment below is that\u001b[39;00m\n\u001b[1;32m    198\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m    199\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 200\u001b[0m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m    201\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    202\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "model,(train_losses, train_top1_accs, train_top5_accs, val_losses, val_top1_accs, val_top5_accs) = train_model(net,trainloader,device,testloader, criterion, optimizer, scheduler, num_epochs)  \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def to_cpu_numpy(tensor):\n",
    "    return tensor.cpu().numpy()\n",
    "def to_cpu(list_):\n",
    "    if isinstance(list_, list):\n",
    "        return list(map(to_cpu_numpy,list_))\n",
    "    else:\n",
    "        return to_cpu_numpy(list_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_training_results(train_losses, to_cpu(train_top1_accs), to_cpu(train_top5_accs), val_losses, to_cpu(val_top1_accs), to_cpu(val_top5_accs))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
