{
 "cells": [
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-05-02T08:14:22.904337Z",
     "start_time": "2024-05-02T08:14:22.897469Z"
    }
   },
   "source": [
    "# 导入包\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torchvision\n",
    "import torch.optim as optim   # optim中定义了各种各样的优化方法，包括SGD\n",
    "from torchvision.datasets import ImageFolder\n",
    "from torchvision import transforms\n",
    "from torch.utils.data import DataLoader\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os\n",
    "import random"
   ],
   "outputs": [],
   "execution_count": 7
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-05-02T08:14:22.941433Z",
     "start_time": "2024-05-02T08:14:22.934597Z"
    }
   },
   "source": [
    "# 所有的参数\n",
    "\n",
    "train_path = './dataset2/traindata_resized/'\n",
    "test_path = './dataset2/testdata/'\n",
    "rate = 0.8\n",
    "n_classes = 48\n",
    "batch = 32"
   ],
   "outputs": [],
   "execution_count": 8
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-05-02T08:14:22.998050Z",
     "start_time": "2024-05-02T08:14:22.958040Z"
    }
   },
   "source": [
    "#加上transforms\n",
    "data_transform = transforms.Compose([  # Compose方法是将多种变换组合起来\n",
    "    # transforms.RandomResizedCrop(size=(72,72)),  # 依据给定的size随机裁剪 也就是50*50\n",
    "    transforms.ToTensor(),  # 将图片转换为Tensor,会自动将[0,255]归一化至[0,1]\n",
    "    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # #标准化至[-1,1]\n",
    "])\n",
    "train_dataset = ImageFolder(train_path, transform=data_transform)\n",
    "train_loader = DataLoader(dataset=train_dataset, batch_size = batch, shuffle=True, num_workers=4)\n"
   ],
   "outputs": [],
   "execution_count": 9
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-05-02T08:14:23.015648Z",
     "start_time": "2024-05-02T08:14:23.001026Z"
    }
   },
   "source": [
    "class Vgg16_net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Vgg16_net, self).__init__()\n",
    " \n",
    "        self.layer1=nn.Sequential(\n",
    "            nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,stride=1,padding=1), #(224-3+2)/1+1=224   224*224*64\n",
    "            nn.BatchNorm2d(64),\n",
    "            #inplace-选择是否进行覆盖运算\n",
    "            #意思是是否将计算得到的值覆盖之前的值，比如\n",
    "            nn.ReLU(inplace=True),\n",
    "            #意思就是对从上层网络Conv2d中传递下来的tensor直接进行修改，\n",
    "            #这样能够节省运算内存，不用多存储其他变量\n",
    " \n",
    "            nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1), #(224-3+2)/1+1=224    224*224*64\n",
    "            #Batch Normalization强行将数据拉回到均值为0，方差为1的正太分布上，\n",
    "            # 一方面使得数据分布一致，另一方面避免梯度消失。\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(inplace=True),\n",
    " \n",
    "            nn.MaxPool2d(kernel_size=2,stride=2)   #(224-2)/2+1=112         112*112*64\n",
    "        )\n",
    " \n",
    " \n",
    "        self.layer2=nn.Sequential(\n",
    "            nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),  #(112-3+2)/1+1=112 112*112*128\n",
    "            nn.BatchNorm2d(128),\n",
    "            nn.ReLU(inplace=True),\n",
    " \n",
    "            nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1), #(112-3+2)/1+1=112 112*112*128\n",
    "            nn.BatchNorm2d(128),\n",
    "            nn.ReLU(inplace=True),\n",
    " \n",
    "            nn.MaxPool2d(2,2)    #(112-2)/2+1=56     56*56*128\n",
    "        )\n",
    " \n",
    "        # self.layer3=nn.Sequential(\n",
    "        #     nn.Conv2d(in_channels=128,out_channels=256,kernel_size=3,stride=1,padding=1),  #(56-3+2)/1+1=56   56*56*256\n",
    "        #     nn.BatchNorm2d(256),\n",
    "        #     nn.ReLU(inplace=True),\n",
    " \n",
    "        #     nn.Conv2d(in_channels=256,out_channels=256,kernel_size=3,stride=1,padding=1),  #(56-3+2)/1+1=56   56*56*256\n",
    "        #     nn.BatchNorm2d(256),\n",
    "        #     nn.ReLU(inplace=True),\n",
    " \n",
    "        #     nn.MaxPool2d(2,2)     #(56-2)/2+1=28     28*28*256\n",
    "        # )\n",
    "\n",
    "        self.conv=nn.Sequential(\n",
    "            self.layer1,\n",
    "            self.layer2,\n",
    "            # self.layer3,\n",
    "            # self.layer4\n",
    "            # self.layer5\n",
    "        )\n",
    " \n",
    "        self.fc=nn.Sequential(\n",
    "            #y=xA^T+b  x是输入,A是权值,b是偏执,y是输出\n",
    "            #nn.Liner(in_features,out_features,bias)\n",
    "            #in_features:输入x的列数  输入数据:[batchsize,in_features]\n",
    "            #out_freatures:线性变换后输出的y的列数,输出数据的大小是:[batchsize,out_features]\n",
    "            #bias: bool  默认为True\n",
    "            #线性变换不改变输入矩阵x的行数,仅改变列数\n",
    "            nn.Linear(56*56*128,1024),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Dropout(0.5),\n",
    " \n",
    "            nn.Linear(1024,512),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(512,n_classes)\n",
    "        )\n",
    "\n",
    "    def forward(self,x):\n",
    "        x=self.conv(x)\n",
    "        #这里-1表示一个不确定的数，就是你如果不确定你想要reshape成几行，但是你很肯定要reshape成512列\n",
    "        # 那不确定的地方就可以写成-1\n",
    " \n",
    "        #如果出现x.size(0)表示的是batchsize的值\n",
    "        # x=x.view(x.size(0),-1)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x=self.fc(x)\n",
    "        return x"
   ],
   "outputs": [],
   "execution_count": 10
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-05-02T08:14:26.739264Z",
     "start_time": "2024-05-02T08:14:23.018586Z"
    }
   },
   "source": [
    "# 开始训练\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(device)\n",
    "net = Vgg16_net().to(device)\n",
    "\n",
    "print(net)\n",
    "# CrossEntropyLoss就是我们需要的损失函数\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr=0.00001)\n",
    "print('Using: {} device'.format(device))"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n"
     ]
    },
    {
     "ename": "RuntimeError",
     "evalue": "CUDA error: device-side assert triggered\nCUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mRuntimeError\u001B[0m                              Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[11], line 4\u001B[0m\n\u001B[0;32m      2\u001B[0m device \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mdevice(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcuda\u001B[39m\u001B[38;5;124m\"\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m torch\u001B[38;5;241m.\u001B[39mcuda\u001B[38;5;241m.\u001B[39mis_available() \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcpu\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m      3\u001B[0m \u001B[38;5;28mprint\u001B[39m(device)\n\u001B[1;32m----> 4\u001B[0m net \u001B[38;5;241m=\u001B[39m \u001B[43mVgg16_net\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mto\u001B[49m\u001B[43m(\u001B[49m\u001B[43mdevice\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m      6\u001B[0m \u001B[38;5;28mprint\u001B[39m(net)\n\u001B[0;32m      7\u001B[0m \u001B[38;5;66;03m# CrossEntropyLoss就是我们需要的损失函数\u001B[39;00m\n",
      "File \u001B[1;32mC:\\ProgramData\\Miniconda3\\envs\\torch\\lib\\site-packages\\torch\\nn\\modules\\module.py:927\u001B[0m, in \u001B[0;36mModule.to\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m    923\u001B[0m         \u001B[38;5;28;01mreturn\u001B[39;00m t\u001B[38;5;241m.\u001B[39mto(device, dtype \u001B[38;5;28;01mif\u001B[39;00m t\u001B[38;5;241m.\u001B[39mis_floating_point() \u001B[38;5;129;01mor\u001B[39;00m t\u001B[38;5;241m.\u001B[39mis_complex() \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[0;32m    924\u001B[0m                     non_blocking, memory_format\u001B[38;5;241m=\u001B[39mconvert_to_format)\n\u001B[0;32m    925\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m t\u001B[38;5;241m.\u001B[39mto(device, dtype \u001B[38;5;28;01mif\u001B[39;00m t\u001B[38;5;241m.\u001B[39mis_floating_point() \u001B[38;5;129;01mor\u001B[39;00m t\u001B[38;5;241m.\u001B[39mis_complex() \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m, non_blocking)\n\u001B[1;32m--> 927\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_apply\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconvert\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mC:\\ProgramData\\Miniconda3\\envs\\torch\\lib\\site-packages\\torch\\nn\\modules\\module.py:579\u001B[0m, in \u001B[0;36mModule._apply\u001B[1;34m(self, fn)\u001B[0m\n\u001B[0;32m    577\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_apply\u001B[39m(\u001B[38;5;28mself\u001B[39m, fn):\n\u001B[0;32m    578\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m module \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchildren():\n\u001B[1;32m--> 579\u001B[0m         \u001B[43mmodule\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_apply\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfn\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    581\u001B[0m     \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mcompute_should_use_set_data\u001B[39m(tensor, tensor_applied):\n\u001B[0;32m    582\u001B[0m         \u001B[38;5;28;01mif\u001B[39;00m torch\u001B[38;5;241m.\u001B[39m_has_compatible_shallow_copy_type(tensor, tensor_applied):\n\u001B[0;32m    583\u001B[0m             \u001B[38;5;66;03m# If the new tensor has compatible tensor type as the existing tensor,\u001B[39;00m\n\u001B[0;32m    584\u001B[0m             \u001B[38;5;66;03m# the current behavior is to change the tensor in-place using `.data =`,\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    589\u001B[0m             \u001B[38;5;66;03m# global flag to let the user control whether they want the future\u001B[39;00m\n\u001B[0;32m    590\u001B[0m             \u001B[38;5;66;03m# behavior of overwriting the existing tensor or not.\u001B[39;00m\n",
      "File \u001B[1;32mC:\\ProgramData\\Miniconda3\\envs\\torch\\lib\\site-packages\\torch\\nn\\modules\\module.py:579\u001B[0m, in \u001B[0;36mModule._apply\u001B[1;34m(self, fn)\u001B[0m\n\u001B[0;32m    577\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_apply\u001B[39m(\u001B[38;5;28mself\u001B[39m, fn):\n\u001B[0;32m    578\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m module \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchildren():\n\u001B[1;32m--> 579\u001B[0m         \u001B[43mmodule\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_apply\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfn\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    581\u001B[0m     \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mcompute_should_use_set_data\u001B[39m(tensor, tensor_applied):\n\u001B[0;32m    582\u001B[0m         \u001B[38;5;28;01mif\u001B[39;00m torch\u001B[38;5;241m.\u001B[39m_has_compatible_shallow_copy_type(tensor, tensor_applied):\n\u001B[0;32m    583\u001B[0m             \u001B[38;5;66;03m# If the new tensor has compatible tensor type as the existing tensor,\u001B[39;00m\n\u001B[0;32m    584\u001B[0m             \u001B[38;5;66;03m# the current behavior is to change the tensor in-place using `.data =`,\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    589\u001B[0m             \u001B[38;5;66;03m# global flag to let the user control whether they want the future\u001B[39;00m\n\u001B[0;32m    590\u001B[0m             \u001B[38;5;66;03m# behavior of overwriting the existing tensor or not.\u001B[39;00m\n",
      "File \u001B[1;32mC:\\ProgramData\\Miniconda3\\envs\\torch\\lib\\site-packages\\torch\\nn\\modules\\module.py:602\u001B[0m, in \u001B[0;36mModule._apply\u001B[1;34m(self, fn)\u001B[0m\n\u001B[0;32m    598\u001B[0m \u001B[38;5;66;03m# Tensors stored in modules are graph leaves, and we don't want to\u001B[39;00m\n\u001B[0;32m    599\u001B[0m \u001B[38;5;66;03m# track autograd history of `param_applied`, so we have to use\u001B[39;00m\n\u001B[0;32m    600\u001B[0m \u001B[38;5;66;03m# `with torch.no_grad():`\u001B[39;00m\n\u001B[0;32m    601\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m torch\u001B[38;5;241m.\u001B[39mno_grad():\n\u001B[1;32m--> 602\u001B[0m     param_applied \u001B[38;5;241m=\u001B[39m \u001B[43mfn\u001B[49m\u001B[43m(\u001B[49m\u001B[43mparam\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    603\u001B[0m should_use_set_data \u001B[38;5;241m=\u001B[39m compute_should_use_set_data(param, param_applied)\n\u001B[0;32m    604\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m should_use_set_data:\n",
      "File \u001B[1;32mC:\\ProgramData\\Miniconda3\\envs\\torch\\lib\\site-packages\\torch\\nn\\modules\\module.py:925\u001B[0m, in \u001B[0;36mModule.to.<locals>.convert\u001B[1;34m(t)\u001B[0m\n\u001B[0;32m    922\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m convert_to_format \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m \u001B[38;5;129;01mand\u001B[39;00m t\u001B[38;5;241m.\u001B[39mdim() \u001B[38;5;129;01min\u001B[39;00m (\u001B[38;5;241m4\u001B[39m, \u001B[38;5;241m5\u001B[39m):\n\u001B[0;32m    923\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m t\u001B[38;5;241m.\u001B[39mto(device, dtype \u001B[38;5;28;01mif\u001B[39;00m t\u001B[38;5;241m.\u001B[39mis_floating_point() \u001B[38;5;129;01mor\u001B[39;00m t\u001B[38;5;241m.\u001B[39mis_complex() \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[0;32m    924\u001B[0m                 non_blocking, memory_format\u001B[38;5;241m=\u001B[39mconvert_to_format)\n\u001B[1;32m--> 925\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mt\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mto\u001B[49m\u001B[43m(\u001B[49m\u001B[43mdevice\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdtype\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mif\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mt\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mis_floating_point\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;129;43;01mor\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mt\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mis_complex\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01melse\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mnon_blocking\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[1;31mRuntimeError\u001B[0m: CUDA error: device-side assert triggered\nCUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1."
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "cell_type": "code",
   "metadata": {},
   "source": [
    "\n",
    "print(\"Start Training...\")\n",
    "tra_steep_loss_df = pd.DataFrame(columns=('step', 'loss'))  # 创建列表\n",
    "tra_steep_acc_df = pd.DataFrame(columns=('step', 'acc'))  # 创建列表\n",
    "\n",
    "loss100 = 0.0\n",
    "correct = 0\n",
    "total = 0\n",
    "for epoch in range(100):\n",
    "# 我们用一个变量来记录每100个batch的平均loss\n",
    "# 我们的dataloader派上了用场\n",
    "    for i, data in enumerate(train_loader):\n",
    "        inputs, labels = data\n",
    "        inputs, labels = inputs.to(device), labels.to(device) # 注意需要复制到GPU\n",
    "        # print(labels)\n",
    "        point_labels = labels.tolist()    # 把标签记录下来\n",
    "\n",
    "        # 首先要把梯度清零，不然PyTorch每次计算梯度会累加，不清零的话第二次算的梯度等于第一次加第二次的\n",
    "        optimizer.zero_grad()\n",
    "        # 计算前向传播的输出，这里就体现出来动态建图了，你还可以传入其他的参数来改变网络的结构\n",
    "        outputs = net(inputs)   # 模型的输出- 找对应需要的\n",
    "        # print(outputs)\n",
    "        # 根据输出计算loss\n",
    "        loss = criterion(outputs, labels)\n",
    "        # 算完loss之后进行反向梯度传播，这个过程之后梯度会记录在变量中\n",
    "        loss.backward()\n",
    "        # 用计算的梯度去做优化\n",
    "        optimizer.step()\n",
    "        # 可以调用下面的函数来查看参数\n",
    "        # list(（net.parameters)   \n",
    "\n",
    "        loss100 += loss.item()\n",
    "        if i % 100 == 0:\n",
    "            _, pred = torch.max(outputs.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (pred == labels).sum().item()\n",
    "            accuracy = 100 * correct/total\n",
    "            print('correct = %d , total =  %d' %(correct,total))\n",
    "            print('[Epoch %d, Batch %5d] train_loss:%.3f, accuracy:%.3f]' %(epoch + 1, epoch * 800 + i, loss100 / 100,accuracy))\n",
    "            ## 写入列表，用于保存到excel\n",
    "            tra_loss_temp_df = pd.DataFrame([epoch * 800 + i, loss100/100], index=('step', 'loss')).T  # 导入\n",
    "            tra_acc_temp_df = pd.DataFrame([epoch * 800 + i, accuracy], index=('step', 'acc')).T\n",
    "            # tra_steep_loss_df = tra_steep_loss_df.append(tra_loss_temp_df)  # 添加\n",
    "            # tra_steep_acc_df = tra_steep_acc_df.append(tra_acc_temp_df)\n",
    "            tra_steep_loss_df = pd.concat([tra_steep_loss_df,tra_loss_temp_df])  # 添加\n",
    "            tra_steep_acc_df = pd.concat([tra_steep_acc_df,tra_acc_temp_df])\n",
    "            loss100 = 0.0\n",
    "\n",
    "## 写入excel\n",
    "tra_steep_loss_df = tra_steep_loss_df.reset_index(drop=True)  # 更改序号为连续序号\n",
    "tra_steep_acc_df = tra_steep_acc_df.reset_index(drop=True)\n",
    "\n",
    "\n",
    "tra_steep_loss_df.to_csv('1m_CNN_tra_steep_loss.csv')\n",
    "tra_steep_acc_df.to_csv('1m_CNN_tra_steep_acc.csv')\n",
    "print(\"Done Training!\")\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {},
   "source": [
    "# 保存训练好的模型\n",
    "state = {'net':net,'dict':net.state_dict(), 'optimizer':optimizer.state_dict()}\n",
    "torch.save(state, './model/net10_20240427v2.pth')"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {},
   "source": [
    "# 测试数据加载\n",
    "\n",
    "test_path = './dataset3/testdata/'\n",
    "data_transform = transforms.Compose([  # Compose方法是将多种变换组合起来\n",
    "    transforms.RandomResizedCrop(size=(112,112)),  # 依据给定的size随机裁剪 也就是50*50\n",
    "    transforms.ToTensor(),  # 将图片转换为Tensor,会自动将[0,255]归一化至[0,1]\n",
    "    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # #标准化至[-1,1]\n",
    "])\n",
    "test_dataset = ImageFolder(test_path, transform=data_transform)\n",
    "test_loader = DataLoader(dataset=test_dataset, batch_size=batch, shuffle=True, num_workers=4)\n",
    "# print(\"------------\")\n",
    "print('test_loader',test_loader)\n",
    "print('(test_dataset',test_dataset)\n",
    "# print('test_dataset.classes',test_dataset.classes)\n",
    "# print('test_dataset.class_to_idx',test_dataset.class_to_idx)"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {},
   "source": [
    "# 模型保存后的测试-加载模型\n",
    "\n",
    "checkpoint = torch.load('./model/net10_20240427v2.pth')\n",
    "model_optimizer = optimizer.load_state_dict(checkpoint['optimizer'])\n",
    "model = checkpoint['net']\n",
    "model_dict = model.load_state_dict(checkpoint['dict'])\n",
    "\n",
    "# 读取测试文件标签\n",
    "# labels_loc = pd.read_csv('./data/501/label_501_1m.csv')\n",
    "# length = labels_loc.shape[0]   # 读取目标长度\n",
    "# print(labels_loc)\n",
    "# print(length)\n",
    "# test01 测试集\n",
    "dataiter = iter(test_loader)#\n",
    "\n",
    "# codebook = ['1':2, '2':48, '3:',47]; \n",
    "\n",
    "\n",
    "# 预测正确的数量和总数量- 保存结果\n",
    "correct = 0\n",
    "total = 0\n",
    "p_soft = 0\n",
    "x = []\n",
    "y = []\n",
    "point_lab=[]\n",
    "locx=[]\n",
    "locy=[]\n",
    "# 使用torch.no_grad的话在前向传播中不记录梯度，节省内存\n",
    "test01_class_acc_df = pd.DataFrame(columns=('original', 'predicted'))  # 创建列表\n",
    "test01_steep_acc_df = pd.DataFrame(columns=('step', 'acc'))  # 创建列表\n",
    "\n",
    "# 真正开始预测\n",
    "with torch.no_grad():\n",
    "    for i, data in enumerate(test_loader):\n",
    "        print(i)\n",
    "        images, labels = data   # 测试中的labels对应1-77。  -1-77 ->  1-48\n",
    "        print (\"images = \", images,\"lables = \", labels)\n",
    "        print(\"----------\")\n",
    "        images, labels = images.to(device), labels.to(device)\n",
    "\n",
    "        # 预测\n",
    "        outputs = model(images)\n",
    "        \n",
    "        images.size()\n",
    "        print(outputs)\n",
    "        outputs.size()\n",
    "        print(outputs[1,:])\n",
    "        value_pre = torch.max(outputs,1)\n",
    "        print(value_pre)\n",
    "\n",
    "        \n",
    "\n",
    "        _, predicted = torch.max(outputs,1)\n",
    "\n",
    "        # # --------判断-------  # 输出分类的准确率\n",
    "        # if labels==predicted :\n",
    "        #     correct = correct+1\n",
    "        # else\n",
    "        #     eorrs = eorrs+1\n",
    "\n",
    "        #  #---保存标签  # 目的是为了统计定位的精度\n",
    "        #  a(i) = ['labels','predicted']\n",
    "         \n",
    "#-------------\n",
    "        print(\"predicted = \", predicted)\n",
    "        print(labels)\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {},
   "source": [
    "\n"
   ],
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PyTorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.18"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
