{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "cd8928d0-33d5-4f69-b8cf-3d31b882cf91",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "# 无雾\n",
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/testing/image_2/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "48ad54aa-102a-4be2-9877-81f5118a9a54",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0_to_fog_1.0_plain/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.845      0.623      0.732      0.454\n",
      "                   Car       2244       8711      0.914      0.763      0.879       0.63\n",
      "                   Van       2244        861      0.824      0.635      0.752      0.512\n",
      "                 Truck       2244        333       0.96      0.733      0.824      0.609\n",
      "                  Tram       2244        138      0.953      0.591      0.793      0.447\n",
      "            Pedestrian       2244       1286      0.739      0.617      0.685      0.358\n",
      "        Person_sitting       2244         89      0.674      0.596       0.66      0.328\n",
      "               Cyclist       2244        496      0.878      0.463      0.584      0.329\n",
      "                  Misc       2244        284      0.815      0.589      0.679      0.418\n",
      "Speed: 0.1ms pre-process, 0.7ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp94\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# plain\n",
    "model = f'runs/train/fog_0_to_fog_1.0_plain/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "506be9a4-2dca-4139-80d4-3b7dd68f8e59",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "f72a8671-1558-4973-95a3-3310b0cb4cd8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/not_freeze:13_17_20_23_24_10_14_18_21/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test... 2244 images, 0 bac\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/test.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.306      0.157      0.152     0.0817\n",
      "                   Car       2244       8711      0.762       0.37      0.443      0.259\n",
      "                   Van       2244        861      0.211      0.251      0.173      0.101\n",
      "                 Truck       2244        333      0.158     0.0691     0.0506     0.0328\n",
      "                  Tram       2244        138     0.0478    0.00725    0.00784    0.00278\n",
      "            Pedestrian       2244       1286      0.469       0.32      0.316      0.145\n",
      "        Person_sitting       2244         89      0.197     0.0196     0.0431     0.0108\n",
      "               Cyclist       2244        496      0.198     0.0927     0.0637     0.0329\n",
      "                  Misc       2244        284      0.403      0.127      0.121     0.0695\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp121\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 整个backbone\n",
    "model = f'runs/train/not_freeze:13_17_20_23_24_10_14_18_21/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "778d78c1-bc55-4ca7-96b0-7afab0ca08f3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/not_freeze:13_17_20_23_24_10_14_18_21_7_8_9/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.446      0.234      0.246      0.133\n",
      "                   Car       2244       8711      0.705      0.495      0.583      0.345\n",
      "                   Van       2244        861       0.24      0.273      0.218      0.125\n",
      "                 Truck       2244        333      0.764      0.183      0.314      0.217\n",
      "                  Tram       2244        138      0.273     0.0435     0.0621     0.0301\n",
      "            Pedestrian       2244       1286      0.572      0.335      0.354      0.145\n",
      "        Person_sitting       2244         89      0.247      0.247      0.147     0.0504\n",
      "               Cyclist       2244        496      0.409      0.129      0.121     0.0579\n",
      "                  Misc       2244        284      0.359       0.17      0.168     0.0941\n",
      "Speed: 0.1ms pre-process, 0.7ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp122\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾测试集\n",
    "model = f'runs/train/not_freeze:13_17_20_23_24_10_14_18_21_7_8_9/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38a238d9-49f8-4fe0-92fe-562f5d8ed65b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ea0ea2c7-efb1-459a-8b7b-a8ff4dbe92c8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f074ab11-8b0e-482b-99bb-d04ccb3355c7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83c051a9-90fc-4f35-a617-073c67b351bf",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4692451b-ff31-49ae-9f80-d533401029d1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "2c201d76-227e-4855-9453-1b52bc9cbae7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "# 然后是0.6雾测试集\n",
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/datasets/fogged/fogged_strength1.0/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "6d9e1c2a-d266-4b39-9ada-02a289c0abdc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0_to_fog_1.0_plain/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test... 2244 images, 0 bac\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/test.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.836      0.594      0.695      0.422\n",
      "                   Car       2244       8711      0.914      0.729      0.849      0.604\n",
      "                   Van       2244        861      0.843      0.657      0.743      0.515\n",
      "                 Truck       2244        333      0.855      0.568      0.666      0.404\n",
      "                  Tram       2244        138      0.764      0.588        0.7      0.372\n",
      "            Pedestrian       2244       1286      0.825      0.587      0.682      0.372\n",
      "        Person_sitting       2244         89        0.8      0.495      0.607      0.327\n",
      "               Cyclist       2244        496      0.826       0.49      0.584      0.332\n",
      "                  Misc       2244        284      0.862      0.636      0.727      0.453\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp95\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# plain\n",
    "model = f'runs/train/fog_0_to_fog_1.0_plain/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1ab6981e-20ee-4ea1-b5de-e2cce328becd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "e3de33a5-2636-4ac6-9f71-96f29538db20",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0_to_fog_1.0_ewc_0.001L2/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.877      0.572      0.695       0.42\n",
      "                   Car       2244       8711      0.931      0.698      0.842      0.599\n",
      "                   Van       2244        861      0.891      0.608       0.72      0.502\n",
      "                 Truck       2244        333      0.866      0.556      0.668      0.401\n",
      "                  Tram       2244        138      0.881      0.551      0.689       0.36\n",
      "            Pedestrian       2244       1286      0.866      0.576      0.689      0.366\n",
      "        Person_sitting       2244         89       0.81      0.461      0.618      0.321\n",
      "               Cyclist       2244        496      0.886       0.47      0.591      0.333\n",
      "                  Misc       2244        284      0.884      0.655      0.745       0.48\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp96\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "#ewc\n",
    "model = f'runs/train/fog_0_to_fog_1.0_ewc_0.001L2/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "0bb631bf-7b79-4d5d-b2dd-5b7147876f39",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0_to_fog_1.0_SI_0.005/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.838      0.587      0.689      0.421\n",
      "                   Car       2244       8711      0.916      0.722      0.845        0.6\n",
      "                   Van       2244        861      0.841      0.632      0.723      0.495\n",
      "                 Truck       2244        333      0.871      0.591      0.674      0.415\n",
      "                  Tram       2244        138      0.818      0.565      0.674      0.352\n",
      "            Pedestrian       2244       1286      0.821      0.582      0.676      0.368\n",
      "        Person_sitting       2244         89      0.755      0.472      0.596      0.323\n",
      "               Cyclist       2244        496      0.836      0.488      0.587      0.337\n",
      "                  Misc       2244        284      0.847      0.644      0.736      0.473\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp97\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# SI\n",
    "model = f'runs/train/fog_0_to_fog_1.0_SI_0.005/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fbc4a913-fa7a-4db3-9520-86bb7cc2b866",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "bb38562d-9e79-4cf6-b496-7d5bac37b4f5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0_to_fog_1.0_replay_1:9/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.865      0.595      0.708      0.428\n",
      "                   Car       2244       8711       0.92      0.723      0.849      0.603\n",
      "                   Van       2244        861      0.852      0.655      0.733      0.502\n",
      "                 Truck       2244        333      0.864      0.544      0.671      0.391\n",
      "                  Tram       2244        138      0.889      0.609      0.738      0.382\n",
      "            Pedestrian       2244       1286      0.836      0.588      0.682      0.371\n",
      "        Person_sitting       2244         89      0.789      0.517      0.662       0.35\n",
      "               Cyclist       2244        496      0.894      0.484      0.582      0.335\n",
      "                  Misc       2244        284      0.877      0.644      0.747      0.485\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp98\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 1:9\n",
    "model = f'runs/train/fog_0_to_fog_1.0_replay_1:9/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6dc02dfe-1e1d-4bf8-802b-0c89e5bda715",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "865ebacc-0d24-4cf6-b18c-1f856dd9eca3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0_to_fog_1.0_replay_3:7/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.828      0.578      0.683      0.414\n",
      "                   Car       2244       8711      0.905      0.706      0.833      0.589\n",
      "                   Van       2244        861      0.833      0.624       0.71      0.486\n",
      "                 Truck       2244        333       0.85      0.508      0.627      0.389\n",
      "                  Tram       2244        138      0.779      0.616      0.709      0.375\n",
      "            Pedestrian       2244       1286      0.821       0.58       0.67      0.363\n",
      "        Person_sitting       2244         89      0.757      0.524      0.647      0.338\n",
      "               Cyclist       2244        496       0.84      0.482      0.585      0.336\n",
      "                  Misc       2244        284      0.839      0.587      0.684      0.435\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp99\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/fog_0_to_fog_1.0_replay_3:7/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28b524f4-f995-4836-b2f2-cebea4cb2268",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "c81aa152-f47f-4954-ab2b-810b8e600a43",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0_to_fog_1.0_replay_4:6/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.828      0.568      0.673      0.408\n",
      "                   Car       2244       8711      0.899      0.708      0.829      0.587\n",
      "                   Van       2244        861      0.807      0.632      0.704       0.48\n",
      "                 Truck       2244        333      0.848      0.502      0.643       0.39\n",
      "                  Tram       2244        138      0.819      0.565      0.673       0.35\n",
      "            Pedestrian       2244       1286      0.835       0.58      0.675      0.367\n",
      "        Person_sitting       2244         89        0.8      0.451      0.592      0.314\n",
      "               Cyclist       2244        496      0.796       0.49      0.564      0.323\n",
      "                  Misc       2244        284      0.817       0.62      0.702      0.449\n",
      "Speed: 0.0ms pre-process, 0.8ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp119\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 4:6\n",
    "model = f'runs/train/fog_0_to_fog_1.0_replay_4:6/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4127ff4c-5522-4352-b6a0-138aaee09757",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "476b500b-e6c4-44fb-88ca-70e0c357543a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "f4735c0a-154d-4e10-8980-551196ee3195",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/not_freeze:13_17_20_23_24_10_14_18_21/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.653      0.273      0.323      0.188\n",
      "                   Car       2244       8711      0.857      0.477      0.606       0.38\n",
      "                   Van       2244        861      0.696      0.285      0.341      0.236\n",
      "                 Truck       2244        333      0.509      0.156       0.17      0.109\n",
      "                  Tram       2244        138      0.522      0.174      0.201      0.113\n",
      "            Pedestrian       2244       1286       0.74      0.379      0.435      0.225\n",
      "        Person_sitting       2244         89       0.66      0.202      0.288       0.13\n",
      "               Cyclist       2244        496      0.702      0.218      0.256      0.145\n",
      "                  Misc       2244        284      0.539      0.292      0.288      0.167\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp120\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 整个backbone\n",
    "model = f'runs/train/not_freeze:13_17_20_23_24_10_14_18_21/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "abbaa429-c20b-4b3c-beaf-4682f1609662",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "acc8c697-a6ca-477c-bacc-afe177141f9a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "7fe469d9-fe14-44d6-8f06-32d0f5f312b9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/not_freeze:13_17_20_23_24_10_14_18_21_7_8_9/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test... 2244 images, 0 bac\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/test.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.683      0.365       0.42      0.236\n",
      "                   Car       2244       8711      0.822      0.544       0.65      0.404\n",
      "                   Van       2244        861      0.745      0.376       0.44      0.283\n",
      "                 Truck       2244        333      0.579      0.222      0.256      0.146\n",
      "                  Tram       2244        138       0.54      0.357      0.366      0.171\n",
      "            Pedestrian       2244       1286      0.713      0.435      0.493      0.255\n",
      "        Person_sitting       2244         89        0.7      0.315      0.425      0.212\n",
      "               Cyclist       2244        496      0.755       0.31      0.354      0.191\n",
      "                  Misc       2244        284      0.609      0.363      0.373      0.226\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp123\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# \n",
    "model = f'runs/train/not_freeze:13_17_20_23_24_10_14_18_21_7_8_9/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a92ba588-02b1-4692-b755-5f0ae77cff62",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
