{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "1dd8c7a5-cfa9-461e-b0fc-6efe18e7af49",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/Lwf_with_head/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 88456e3d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 169 layers, 7166563 parameters, 0 gradients, 64.2 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.684      0.608      0.685      0.366\n",
      "                   car       2244       8711      0.696      0.879      0.875      0.532\n",
      "                   van       2244        861      0.745      0.754      0.808      0.473\n",
      "                 truck       2244        333      0.909      0.772      0.881      0.558\n",
      "                  tram       2244        138      0.933      0.506      0.815       0.48\n",
      "                person       2244       1286      0.449      0.618      0.571      0.269\n",
      "        person_sitting       2244         89      0.309      0.573      0.461      0.165\n",
      "               cyclist       2244        496      0.678      0.446      0.495      0.152\n",
      "                  misc       2244        284      0.755      0.315      0.575      0.298\n",
      "Speed: 0.0ms pre-process, 1.1ms inference, 1.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp347\u001b[0m\n",
      "Vis\n"
     ]
    }
   ],
   "source": [
    "# 显存占用过于夸张。\n",
    "model = f'runs/train/Lwf_with_head/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c9282bd9-ab0a-4814-99d4-7c92c89a0520",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c1127011-3350-4457-ae23-ca82837d2300",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/replay_Lwf_with_head/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 88456e3d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 169 layers, 7166563 parameters, 0 gradients, 64.2 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.83       0.78      0.848      0.545\n",
      "                   car       2244       8711      0.889      0.891      0.946      0.707\n",
      "                   van       2244        861       0.86      0.832      0.898      0.645\n",
      "                 truck       2244        333      0.906      0.955      0.972      0.716\n",
      "                  tram       2244        138      0.854      0.899      0.944      0.601\n",
      "                person       2244       1286       0.85       0.65      0.744      0.391\n",
      "        person_sitting       2244         89       0.61      0.494      0.587      0.306\n",
      "               cyclist       2244        496      0.822      0.734      0.833      0.456\n",
      "                  misc       2244        284      0.853      0.785      0.864      0.535\n",
      "Speed: 0.0ms pre-process, 1.2ms inference, 1.6ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp350\u001b[0m\n",
      "Vis\n"
     ]
    }
   ],
   "source": [
    "# 显存占用过于夸张。\n",
    "model = f'runs/train/replay_Lwf_with_head/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f005e2bc-d028-4670-bd44-a366f449d87f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "485399b6-53e1-4368-b453-9cb3cdb2e4c7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "2a3f0a64-2df9-4035-a622-38381266e416",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/replay_Lwf_with_head_vis/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 88456e3d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 169 layers, 7220503 parameters, 0 gradients, 64.6 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.337      0.218      0.201      0.106\n",
      "                   car       1610      28074       0.58      0.647       0.62      0.354\n",
      "                   van       1610       5771       0.28      0.262      0.218      0.133\n",
      "                 truck       1610       2659       0.27      0.241      0.168     0.0866\n",
      "                person       1610       6376     0.0323       0.13     0.0221    0.00682\n",
      "               bicycle       1610       1302      0.274       0.03     0.0433     0.0144\n",
      "                   bus       1610       2940      0.629       0.42      0.441      0.265\n",
      "             motorbike       1610       5845      0.398      0.171      0.163     0.0582\n",
      "            pedestrian       1610      21006      0.453      0.188      0.228     0.0872\n",
      "              tricycle       1610        530      0.178     0.0604      0.044     0.0222\n",
      "       awning-tricycle       1610        599      0.277     0.0317     0.0607     0.0304\n",
      "Speed: 0.1ms pre-process, 3.8ms inference, 45.5ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp358\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/replay_Lwf_with_head_vis/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 88456e3d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 169 layers, 7220503 parameters, 0 gradients, 64.6 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.849     0.0365     0.0369      0.025\n",
      "                   car        600        113      0.246     0.0973     0.0734     0.0407\n",
      "                   van        600          6          0          0   0.000205   2.05e-05\n",
      "                 truck        600         17          1          0     0.0041    0.00256\n",
      "                person        600       1131      0.157     0.0221      0.034     0.0125\n",
      "               bicycle        600         43          1          0    0.00344   0.000695\n",
      "                  bird        600         61      0.492     0.0164     0.0252    0.00761\n",
      "                  boat        600         82          1          0     0.0019    0.00125\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3       0.79      0.667      0.665      0.482\n",
      "                   cat        600          5          1          0          0          0\n",
      "                 chair        600         12          1          0          0          0\n",
      "                   dog        600         25          1          0    0.00144   0.000615\n",
      "                 horse        600         37          1          0   0.000619   0.000102\n",
      "                 sheep        600          8          1          0   0.000348   0.000139\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0   0.000523   0.000314\n",
      "                rabbit        600          1          1          0          0          0\n",
      "                monkey        600         16          1          0          0          0\n",
      "                   pig        600          7          1          0          0          0\n",
      "                   toy        600         42          1          0    0.00091   0.000317\n",
      "         traffic light        600          5          1          0          0          0\n",
      "          traffic sign        600          1          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 3.1ms inference, 5.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp359\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/replay_Lwf_with_head_vis/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 88456e3d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 169 layers, 7220503 parameters, 0 gradients, 64.6 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.526     0.0316     0.0242     0.0117\n",
      "                   car       4952       1201      0.326      0.366       0.29      0.155\n",
      "                person       4952       4528      0.299      0.131      0.107     0.0389\n",
      "             aeroplane       4952        285          1          0   0.000701     0.0002\n",
      "               bicycle       4952        337      0.129     0.0119    0.00814    0.00294\n",
      "                  bird       4952        459      0.138     0.0218     0.0102    0.00518\n",
      "                  boat       4952        263      0.207    0.00236    0.00185   0.000548\n",
      "                bottle       4952        469          0          0     0.0044    0.00121\n",
      "                   bus       4952        213      0.119     0.0704     0.0249     0.0141\n",
      "                   cat       4952        358          1          0   0.000443   0.000191\n",
      "                 chair       4952        756      0.124    0.00529     0.0092    0.00386\n",
      "                   cow       4952        244          1          0   0.000936   0.000477\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489          1          0    0.00113   0.000362\n",
      "                 horse       4952        348          1          0   0.000859   0.000349\n",
      "             motorbike       4952        325     0.0843     0.0215    0.00914    0.00279\n",
      "           pottedplant       4952        480     0.0863    0.00208    0.00107   0.000506\n",
      "                 sheep       4952        242          1          0    0.00133   0.000714\n",
      "                  sofa       4952        239          1          0   0.000454   0.000225\n",
      "                 train       4952        282          0          0   0.000386   0.000167\n",
      "             tvmonitor       4952        308          1          0     0.0121    0.00692\n",
      "Speed: 0.1ms pre-process, 1.9ms inference, 2.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp360\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/replay_Lwf_with_head_vis/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 88456e3d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 169 layers, 7220503 parameters, 0 gradients, 64.6 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.702      0.173      0.165     0.0809\n",
      "                   car       2244       8711      0.586      0.739      0.718      0.377\n",
      "                   van       2244        861      0.149     0.0604     0.0857      0.045\n",
      "                 truck       2244        333      0.363        0.3      0.231      0.136\n",
      "                  tram       2244        138          1          0     0.0017   0.000964\n",
      "                person       2244       1286      0.521      0.281      0.261      0.082\n",
      "        person_sitting       2244         89          1          0   0.000677   0.000241\n",
      "               cyclist       2244        496          1          0    0.00377    0.00106\n",
      "                  misc       2244        284          1          0     0.0142    0.00455\n",
      "Speed: 0.0ms pre-process, 1.1ms inference, 4.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp361\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/replay_Lwf_with_head_vis/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f48e3f1-5c7b-43f3-81cb-092bf2b08eb6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9331110-5bed-4857-ba7d-b17fbaef16b1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "62eb5668-02a8-4bfe-a582-3cf3e4c89ced",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ff96e32-a4d2-47bc-af01-b877ac593f57",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1d4d6118-4de6-44f0-92c5-4c24881ff19f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "21b8a8d6-2434-4345-9571-da80b40c0c53",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "edeb320b-c65b-40a4-961e-a4ea8a65b5c4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "15d90ef1-a664-4c5f-ae51-dfd8f8debb80",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "61072486-eb48-4add-84df-bab7fba6480d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2fc77426-f161-4b3a-a05c-4008d3b3f420",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "8128e31d-c71d-41c4-b8c3-5eedd3464f6c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 599 layers, 21501765 parameters, 0 gradients, 99.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.821      0.724      0.797      0.501\n",
      "                   car       2244       8711      0.909       0.87      0.938      0.692\n",
      "                   van       2244        861      0.894      0.763       0.85      0.594\n",
      "                 truck       2244        333      0.905      0.877      0.935      0.686\n",
      "                  tram       2244        138      0.813      0.862      0.907      0.551\n",
      "                person       2244       1286      0.863      0.616      0.725      0.388\n",
      "        person_sitting       2244         89      0.498      0.449      0.506      0.235\n",
      "               cyclist       2244        496       0.82      0.699      0.784      0.428\n",
      "                  misc       2244        284      0.869      0.653      0.733      0.433\n",
      "Speed: 0.0ms pre-process, 2.6ms inference, 1.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp280\u001b[0m\n",
      "Vis\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/k_v_o_replay_DER/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d0731a8-1985-4747-8b57-fc8c6767e7ba",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d6fe059f-2dcc-49b2-be70-2b56c853753b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "47e66c08-07d0-4bfc-9ef8-cee4bc91b94f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "42f514a7-ac7b-46b0-b5ed-cd5bfcf8ce63",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "775a3e4b-e598-4bfe-acf5-418ddcbc24f8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f8f62c38-222a-440e-8838-3939ea696376",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "868240d4-f2cc-4581-ae4b-930e155fa1a2",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_PODNet: \u001b[0mweights=./runs/train/fog_02/weights/last.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=True, Distillation_layers=[17, 20, 23], POD_lambda=100.0, Old_models=['./runs/train/fog_02/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/38ff51e41c994d0493dbfd9f39cea89a\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo_PODNet.Detect               [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/fog_02/weights/last.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo_PODNet.Detect               [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp81/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp81\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.51G    0.08326    0.04905    0.07391         32        640: 1\n",
      "tensor([1.02603], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00071, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0498      0.184     0.0506     0.0256\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.51G    0.06039    0.04324    0.05847         14        640: 1\n",
      "tensor([0.61830], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00105, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.119      0.107     0.0529\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.51G    0.05529    0.03884    0.05258         33        640: 1\n",
      "tensor([0.92532], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00118, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904      0.116      0.144     0.0656\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.51G    0.05059    0.03747    0.04506         29        640: 1\n",
      "tensor([0.77489], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00160, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.121      0.142     0.0676\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.51G    0.04783    0.03671    0.03929         21        640: 1\n",
      "tensor([0.88484], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00170, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.905      0.112      0.144     0.0686\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.51G    0.04577    0.03716    0.03401         21        640: 1\n",
      "tensor([0.76869], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00155, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.914      0.122      0.148     0.0689\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.51G    0.04546    0.03635    0.03122         37        640: 1\n",
      "tensor([0.82109], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00151, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.927      0.134      0.161     0.0794\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.51G    0.04421    0.03528    0.02836         24        640: 1\n",
      "tensor([0.77286], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00162, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.122      0.153     0.0797\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.51G      0.043    0.03483     0.0258         13        640: 1\n",
      "tensor([0.65241], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00164, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911      0.124      0.156     0.0819\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.51G    0.04216    0.03432    0.02471         22        640: 1\n",
      "tensor([0.49841], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00154, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.124      0.158     0.0827\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.51G    0.04125    0.03352    0.02324         32        640: 1\n",
      "tensor([0.66907], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00138, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.131      0.168      0.085\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.51G    0.04047    0.03365    0.02245         24        640: 1\n",
      "tensor([0.65675], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00156, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.921       0.13      0.165     0.0853\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.51G     0.0402    0.03293    0.02096         33        640: 1\n",
      "tensor([0.66789], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00131, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.927      0.128      0.161     0.0821\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.51G    0.03935    0.03308    0.01889         15        640: 1\n",
      "tensor([0.45213], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00155, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.912      0.129       0.16     0.0802\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.51G    0.03866    0.03255    0.01863         15        640: 1\n",
      "tensor([0.49062], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00147, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923       0.13      0.158     0.0789\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.51G    0.03786    0.03203     0.0178         27        640: 1\n",
      "tensor([0.50993], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00129, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917      0.126      0.161      0.082\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.51G     0.0377    0.03207    0.01706         16        640: 1\n",
      "tensor([0.53912], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00130, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.921      0.133      0.164     0.0792\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.51G    0.03752    0.03161    0.01596         31        640: 1\n",
      "tensor([0.62562], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00170, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926       0.14      0.159     0.0782\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.51G      0.037    0.03141    0.01607         24        640: 1\n",
      "tensor([0.53815], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00134, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.129      0.154     0.0792\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.51G     0.0362    0.03077    0.01466         28        640: 1\n",
      "tensor([0.81210], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00156, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.92      0.136      0.163     0.0824\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.51G    0.03635    0.03052    0.01444          9        640: 1\n",
      "tensor([0.73844], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00142, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.925      0.128      0.154     0.0767\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.51G     0.0353    0.03047    0.01348         37        640: 1\n",
      "tensor([0.60117], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00153, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.915      0.136      0.159     0.0786\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.51G    0.03509    0.02987    0.01371         26        640: 1\n",
      "tensor([0.54914], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00123, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.916      0.137      0.159     0.0799\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.51G    0.03448    0.03005    0.01342         25        640: 1\n",
      "tensor([0.48240], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00149, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.133      0.164     0.0821\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.51G    0.03411    0.02943    0.01298         66        640:  "
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_PODNet.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/last.pt \\\n",
    "--PODNet_enable \\\n",
    "--Distillation_layers 17 20 23 \\\n",
    "--POD_lambda 1e2 \\\n",
    "--Old_models \\\n",
    "   ./runs/train/fog_02/weights/last.pt \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f88a19ff-9394-4db1-9c3b-e0b49ce3c393",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "7cc2b14b-a8ac-4846-bd51-a8275250fcf5",
   "metadata": {},
   "outputs": [],
   "source": [
    "pod_layers = ' '.join([str(i) for i in range(24)])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "b2af322e-d734-48b3-b081-c5f4652dc6c4",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_PODNet: \u001b[0mweights=./runs/train/fog_02/weights/last.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=True, Distillation_layers=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], POD_lambda=10.0, Old_models=['./runs/train/fog_02/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/a5b8606a1e34418f8a646aa1704c3af8\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo_PODNet.Detect               [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/fog_02/weights/last.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo_PODNet.Detect               [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp87/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp87\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      6.05G    0.08284    0.04907     0.0737         32        640: 1\n",
      "tensor([0.99605], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00436, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0509       0.17     0.0485     0.0265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      6.05G    0.06019    0.04302    0.05822         14        640: 1\n",
      "tensor([0.58079], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00748, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.854      0.116      0.115     0.0569\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      6.06G    0.05511    0.03871     0.0523         33        640: 1\n",
      "tensor([0.89934], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01022, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.117      0.143     0.0637\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      6.06G    0.05041    0.03755    0.04515         29        640: 1\n",
      "tensor([0.75611], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01351, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.899      0.114      0.149     0.0655\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      6.06G    0.04795    0.03672    0.03946         21        640: 1\n",
      "tensor([0.83996], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01513, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911      0.128       0.16     0.0809\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      6.06G    0.04641    0.03702    0.03357         21        640: 1\n",
      "tensor([0.76705], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01445, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.922      0.124      0.162      0.074\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      6.06G    0.04488    0.03608    0.03065         37        640: 1\n",
      "tensor([0.79429], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01388, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.921      0.137      0.175     0.0857\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      6.06G    0.04352    0.03526    0.02783         24        640: 1\n",
      "tensor([0.74873], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01555, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.123       0.16     0.0829\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      6.06G    0.04243    0.03479    0.02516         13        640: 1\n",
      "tensor([0.66402], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01626, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.124      0.156     0.0782\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      6.06G    0.04185    0.03416    0.02439         22        640: 1\n",
      "tensor([0.52188], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01474, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911      0.129      0.164     0.0734\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      6.06G    0.04102    0.03314    0.02251         32        640: 1\n",
      "tensor([0.65024], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01311, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.92      0.134      0.169     0.0845\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      6.06G     0.0403     0.0333    0.02139         24        640: 1\n",
      "tensor([0.60865], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.01477, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917      0.136       0.17     0.0813\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      6.06G    0.04019    0.03286    0.02118         71        640:  "
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_PODNet.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/last.pt \\\n",
    "--PODNet_enable \\\n",
    "--Distillation_layers {pod_layers} \\\n",
    "--POD_lambda 1e1 \\\n",
    "--Old_models \\\n",
    "   ./runs/train/fog_02/weights/last.pt \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67ae3607-522b-481c-8873-2daed6cc89eb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "309ecc68-d386-4e73-9aac-c4d8f6c3f8b3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "bae18d90-83b9-4253-a0c4-c1df10259ccf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/POD_layers_1_3_5_7_9_13_17_20_23/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 160 layers, 7080253 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.177      0.149      0.154     0.0817\n",
      "                   car       2244       8711      0.808      0.696      0.757       0.43\n",
      "                   van       2244        861          0          0          0          0\n",
      "                 truck       2244        333          0          0          0          0\n",
      "                  tram       2244        138          0          0          0          0\n",
      "                person       2244       1286      0.606      0.499      0.475      0.223\n",
      "        person_sitting       2244         89          0          0          0          0\n",
      "               cyclist       2244        496          0          0          0          0\n",
      "                  misc       2244        284          0          0          0          0\n",
      "Speed: 0.0ms pre-process, 1.4ms inference, 3.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp309\u001b[0m\n",
      "Vis\n"
     ]
    }
   ],
   "source": [
    "pod_layers = '1 3 5 7 9 13 17 20 23'\n",
    "pod_name = pod_layers.replace(' ', '_')\n",
    "\n",
    "model = f'runs/train/POD_layers_{pod_name}/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6708669b-b927-4966-8c98-eb97cae5dd5c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "882d6898-17a2-4438-8792-d86b4d44703f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83ae6542-fde5-4b74-859d-1b9f4e679e7c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ac1a22ff-c241-4e2f-846d-ed4dd4961070",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=fog_0_time, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/95769a922dc34a3a892ca38df223cbd9\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from yolov5s.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/fog_0_time/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/fog_0_time\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.65G    0.08072    0.04759    0.03296        128        640: 1\n",
      "tensor([1.75600], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.734      0.129      0.124     0.0419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      3.65G    0.06457       0.04    0.02248        133        640: 1\n",
      "tensor([1.65806], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.204       0.35      0.215     0.0908\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      3.65G     0.0613    0.03903    0.02026        218        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 129247 ms: Connection timed out\n",
      "       2/99      3.65G    0.05972    0.03868    0.01937        131        640: 1\n",
      "tensor([1.42281], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.536      0.337      0.306      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      3.65G    0.05331    0.03761    0.01617        108        640: 1\n",
      "tensor([1.15876], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.671      0.424       0.44      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      3.65G    0.04864    0.03638    0.01399        156        640: 1\n",
      "tensor([1.21092], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.709      0.473      0.524      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      3.65G    0.04601     0.0358    0.01273        123        640: 1\n",
      "tensor([1.12633], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.579      0.589      0.581      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      3.65G    0.04408     0.0351    0.01141        174        640: 1\n",
      "tensor([1.29816], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.698        0.6       0.62      0.328\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      3.65G     0.0432    0.03418    0.01049        166        640: 1\n",
      "tensor([1.34341], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.643      0.612      0.652      0.352\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      3.65G    0.04215    0.03428   0.009365        152        640: 1\n",
      "tensor([1.09411], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.751      0.619      0.698       0.39\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      3.65G    0.04123    0.03388   0.008728        136        640: 1\n",
      "tensor([1.01852], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.726      0.665      0.721      0.396\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      3.65G     0.0405    0.03344    0.00792        134        640: 1\n",
      "tensor([1.05314], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.767      0.669      0.742      0.424\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      3.65G    0.03981    0.03271   0.007507        182        640: 1\n",
      "tensor([1.09295], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.803      0.668       0.74      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      3.65G    0.03912    0.03299   0.007183        128        640: 1\n",
      "tensor([0.91446], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.769      0.685      0.758       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      3.65G    0.03848     0.0321   0.006699        112        640: 1\n",
      "tensor([0.96948], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.854      0.688      0.774       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      3.65G    0.03776    0.03205   0.006439        151        640: 1\n",
      "tensor([0.94161], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.684      0.773      0.461\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      3.65G    0.03732    0.03199   0.006223        132        640: 1\n",
      "tensor([0.97841], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852      0.698      0.785      0.478\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      3.65G    0.03695    0.03138   0.006012        131        640: 1\n",
      "tensor([0.88883], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.798      0.727      0.788      0.468\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      3.65G    0.03644    0.03126    0.00583        159        640: 1\n",
      "tensor([1.05659], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.817      0.737      0.804      0.481\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      3.65G    0.03595    0.03092   0.005616        125        640: 1\n",
      "tensor([0.84348], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.818      0.709      0.786      0.478\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      3.65G    0.03575    0.03108   0.005572         88        640: 1\n",
      "tensor([0.74057], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.748      0.815      0.507\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      3.65G     0.0354    0.03007   0.005236        137        640: 1\n",
      "tensor([1.05580], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.753       0.82      0.501\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      3.65G    0.03491    0.03062   0.005055        166        640: 1\n",
      "tensor([0.94961], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.752      0.812      0.505\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      3.65G    0.03422    0.03006   0.004975        161        640: 1\n",
      "tensor([0.97848], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.838      0.748       0.83      0.517\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      3.65G    0.03455    0.02969   0.004878        118        640: 1\n",
      "tensor([0.80235], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886      0.735      0.834      0.522\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      3.65G    0.03398    0.02962   0.004707        151        640: 1\n",
      "tensor([0.92674], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.751       0.83      0.526\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      3.65G    0.03386    0.02971    0.00453        133        640: 1\n",
      "tensor([0.87556], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.865      0.787      0.846      0.519\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      3.65G    0.03351    0.02934   0.004368        154        640: 1\n",
      "tensor([0.97849], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874       0.77      0.837      0.528\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      3.65G    0.03322    0.02931   0.004458        122        640: 1\n",
      "tensor([0.80563], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.884      0.761      0.836      0.532\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      3.65G    0.03283    0.02908   0.004303        123        640: 1\n",
      "tensor([0.73215], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.762      0.852      0.547\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      3.65G    0.03309    0.02876   0.004284        127        640: 1\n",
      "tensor([0.77846], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846      0.784      0.838      0.537\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      3.65G    0.03233    0.02775   0.004116        127        640: 1\n",
      "tensor([0.74355], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886      0.777      0.853      0.551\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      3.65G    0.03234    0.02826   0.004087        122        640: 1\n",
      "tensor([0.83938], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.857      0.795      0.861      0.554\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      3.65G    0.03221    0.02851   0.004088        146        640: 1\n",
      "tensor([0.90675], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.887      0.745      0.843      0.547\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      3.65G    0.03181    0.02782   0.003938        202        640: 1\n",
      "tensor([0.95553], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.89      0.771      0.853      0.553\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      3.65G    0.03183    0.02765   0.003816         94        640: 1\n",
      "tensor([0.67400], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.772      0.855      0.567\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      3.65G     0.0316    0.02764   0.003761        152        640: 1\n",
      "tensor([0.89800], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.803      0.858      0.566\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      3.65G    0.03133    0.02744   0.003756        123        640: 1\n",
      "tensor([0.74934], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898       0.78      0.868      0.558\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      3.65G    0.03142    0.02757   0.003735        162        640: 1\n",
      "tensor([0.80210], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.888        0.8      0.858      0.569\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      3.65G    0.03102    0.02765   0.003638        161        640: 1\n",
      "tensor([0.85241], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874      0.815      0.869       0.57\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      3.65G    0.03094    0.02754   0.003608        122        640: 1\n",
      "tensor([0.72625], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.803       0.86      0.565\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      3.65G    0.03072    0.02703   0.003622        126        640: 1\n",
      "tensor([0.73243], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.9      0.786      0.865      0.573\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      3.65G    0.03042    0.02696   0.003513         90        640: 1\n",
      "tensor([0.67693], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.789      0.873      0.573\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      3.65G    0.03027     0.0268   0.003337        118        640: 1\n",
      "tensor([0.77790], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.884      0.817      0.877      0.576\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      3.65G    0.03025    0.02679   0.003301        157        640: 1\n",
      "tensor([0.80486], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.921      0.784      0.869      0.582\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      3.65G    0.03017     0.0268   0.003329        104        640: 1\n",
      "tensor([0.58417], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.807      0.864      0.587\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      3.65G    0.03004    0.02709   0.003234        157        640: 1\n",
      "tensor([0.77997], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.905      0.799      0.869      0.586\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      3.65G    0.03001    0.02643   0.003222        108        640: 1\n",
      "tensor([0.59330], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871      0.819      0.875      0.586\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      3.65G    0.02973    0.02634   0.003287        159        640: 1\n",
      "tensor([0.82862], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.804      0.861      0.571\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      3.65G    0.02938    0.02615   0.003199        118        640: 1\n",
      "tensor([0.71773], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.791      0.863      0.597\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      3.65G    0.02936    0.02654   0.003175        176        640: 1\n",
      "tensor([0.89009], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.909      0.782      0.866      0.599\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      3.65G    0.02918    0.02597   0.003111        130        640: 1\n",
      "tensor([0.69951], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.895       0.81       0.87      0.593\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/99      3.65G    0.02893    0.02618   0.003081        178        640: 1\n",
      "tensor([0.90317], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.806      0.861      0.592\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/99      3.65G    0.02883    0.02573   0.003088        148        640: 1\n",
      "tensor([0.75652], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.807      0.868      0.592\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/99      3.65G    0.02873    0.02562   0.003017        115        640: 1\n",
      "tensor([0.67199], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.902      0.804      0.874      0.599\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/99      3.65G    0.02863    0.02555   0.002981        124        640: 1\n",
      "tensor([0.67098], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.816      0.881      0.603\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/99      3.65G    0.02847    0.02507   0.002964        163        640: 1\n",
      "tensor([0.75758], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.887       0.82      0.875        0.6\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/99      3.65G    0.02812    0.02553   0.002928        200        640: 1\n",
      "tensor([0.83031], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.914      0.803      0.875      0.596\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/99      3.65G    0.02827    0.02545   0.003022        141        640: 1\n",
      "tensor([0.71872], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.888      0.823      0.877        0.6\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/99      3.65G    0.02822    0.02538   0.003048        146        640: 1\n",
      "tensor([0.71913], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.896      0.814      0.879      0.604\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/99      3.65G    0.02794    0.02513   0.002894        168        640: 1\n",
      "tensor([0.73726], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891      0.829      0.875      0.604\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/99      3.65G    0.02783    0.02479   0.002749        175        640: 1\n",
      "tensor([0.78288], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.931      0.817      0.891      0.615\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/99      3.65G    0.02752    0.02494   0.002823        139        640: 1\n",
      "tensor([0.79616], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.932      0.799      0.884      0.614\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/99      3.65G    0.02734    0.02406   0.002757        117        640: 1\n",
      "tensor([0.64282], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891       0.82      0.882      0.611\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/99      3.65G    0.02757    0.02453   0.002707        129        640: 1\n",
      "tensor([0.67578], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.909      0.803      0.876      0.611\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/99      3.65G     0.0272    0.02457   0.002829        109        640: 1\n",
      "tensor([0.63226], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.918      0.814      0.891      0.609\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/99      3.65G    0.02724    0.02473   0.002661        154        640: 1\n",
      "tensor([0.79511], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.928      0.812      0.882       0.62\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/99      3.65G    0.02693    0.02447   0.002679        119        640: 1\n",
      "tensor([0.67969], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.905      0.811      0.883      0.614\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/99      3.65G    0.02695    0.02389   0.002731        153        640: 1\n",
      "tensor([0.72823], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923      0.812      0.885      0.623\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/99      3.65G     0.0268    0.02431    0.00268        116        640: 1\n",
      "tensor([0.64042], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.929      0.822      0.878      0.615\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/99      3.65G    0.02647    0.02364   0.002644        141        640: 1\n",
      "tensor([0.72498], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.934      0.815      0.881      0.619\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/99      3.65G    0.02654    0.02393   0.002527        175        640: 1\n",
      "tensor([0.84789], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.928      0.817      0.876      0.618\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/99      3.65G     0.0266    0.02394   0.002583        161        640: 1\n",
      "tensor([0.72434], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.918       0.82      0.881      0.622\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/99      3.65G    0.02638    0.02363   0.002459        114        640: 1\n",
      "tensor([0.60553], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.913      0.821      0.881       0.62\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/99      3.65G    0.02654    0.02402   0.002604        141        640: 1\n",
      "tensor([0.73110], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911       0.82      0.882      0.622\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/99      3.65G    0.02632    0.02387   0.002496        133        640: 1\n",
      "tensor([0.60088], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.935      0.807      0.886      0.625\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/99      3.65G    0.02597    0.02335   0.002408        159        640: 1\n",
      "tensor([0.75045], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.922      0.815      0.883      0.628\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/99      3.65G    0.02613    0.02336   0.002461        122        640: 1\n",
      "tensor([0.61219], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.93      0.824      0.884       0.63\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/99      3.65G    0.02605    0.02365   0.002454        137        640: 1\n",
      "tensor([0.69006], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.912      0.829       0.88      0.631\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/99      3.65G    0.02595    0.02313   0.002439        137        640: 1\n",
      "tensor([0.66274], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.935      0.816      0.879      0.628\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/99      3.65G    0.02544     0.0227   0.002431        161        640: 1\n",
      "tensor([0.77197], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.939      0.826      0.884      0.631\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      80/99      3.65G    0.02558    0.02317   0.002364        154        640: 1\n",
      "tensor([0.61898], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923      0.832      0.888      0.634\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      81/99      3.65G    0.02543    0.02284   0.002342        181        640: 1\n",
      "tensor([0.78805], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.928      0.819      0.888       0.63\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      82/99      3.65G    0.02531    0.02273   0.002352        149        640: 1\n",
      "tensor([0.65280], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.928      0.816       0.89      0.634\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      83/99      3.65G    0.02519     0.0228   0.002253        118        640: 1\n",
      "tensor([0.59273], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.935      0.825      0.893      0.642\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      84/99      3.65G    0.02503    0.02252   0.002272        178        640: 1\n",
      "tensor([0.77202], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.93      0.836       0.89      0.638\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      85/99      3.65G    0.02495    0.02258   0.002223        140        640: 1\n",
      "tensor([0.67576], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.93      0.835      0.883      0.634\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      86/99      3.65G     0.0251    0.02269    0.00232        119        640: 1\n",
      "tensor([0.55480], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.929      0.834      0.887      0.637\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      87/99      3.65G    0.02495    0.02293   0.002239        195        640:  "
     ]
    }
   ],
   "source": [
    "'''\n",
    "这是没有加雾的，原始的100epoch训练的baseline。\n",
    "SI_enable单独启用表示开启记录si。不添加SI_pt则不会计算SI损失\n",
    "'''\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 100 \\\n",
    "--name fog_0_time \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85753b6b-e137-4e75-89ca-4d37ba6981e0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
