{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "10cf84d9-cabb-45b8-bcf0-18e6f9ad69a6",
   "metadata": {},
   "source": [
    "这里应该是就当作已经有数据集了，也不需要处理了对吧\n",
    "\n",
    "然后我也不清楚使用到的数据集格式是啥样的，data的yaml文件应该是甲方提供吗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4ffc0458-cfba-42b1-8700-5ad33cff8c9d",
   "metadata": {},
   "source": [
    "1、用初始的数据集训练baseline（场景针对性模型）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "c670d5ed-a8ef-40b3-acc5-5dcfce3552a3",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=Baseline, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from yolov5s.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train... 4189 images, 0 b\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/train.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1047 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/Baseline/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/Baseline\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      3.65G    0.08043    0.04745    0.03265         85        640: 1\n",
      "tensor([1.67814], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.736      0.127      0.117     0.0414\n",
      "\n",
      "1 epochs completed in 0.010 hours.\n",
      "Optimizer stripped from runs/train/Baseline/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/Baseline/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/Baseline/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.736      0.127      0.117     0.0414\n",
      "                   Car       1047       4076      0.327      0.724      0.482      0.181\n",
      "                   Van       1047        423          0          0     0.0356     0.0136\n",
      "                 Truck       1047        154          1          0     0.0131    0.00565\n",
      "                  Tram       1047         70          1          0     0.0014   0.000607\n",
      "            Pedestrian       1047        666      0.562       0.29      0.368      0.119\n",
      "        Person_sitting       1047         23          1          0    0.00151   0.000607\n",
      "               Cyclist       1047        219          1          0     0.0341    0.00884\n",
      "                  Misc       1047        138          1          0    0.00416    0.00134\n",
      "Results saved to \u001b[1mruns/train/Baseline\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 1 \\\n",
    "--name Baseline \\\n",
    "--SI_enable \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "\n",
    "\n",
    "#这里如果重新执行过的话可能需要把run给清理一下，因为后面的增量都是基于Basline这个名字的，不清理可能会变成Basline2、Basline3啥的\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9e5d1468-32bc-47d3-b8e8-274df11d826d",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "2ae94fee-129a-4df7-bc81-66d4d1dd2982",
   "metadata": {},
   "source": [
    "(2)、第二个数据集的baseline（场景针对性模型）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "6d898370-391b-4eb4-8a04-42f233978b27",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=Baseline_2, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from yolov5s.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/Baseline_2/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/Baseline_2\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      3.47G    0.05998    0.03479    0.04807         36        640: 1\n",
      "tensor([0.73082], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.662      0.683      0.716      0.393\n",
      "\n",
      "1 epochs completed in 0.034 hours.\n",
      "Optimizer stripped from runs/train/Baseline_2/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/Baseline_2/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/Baseline_2/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.662      0.681      0.715      0.393\n",
      "                   car       4952       1201      0.609      0.893      0.854      0.496\n",
      "                person       4952       4528      0.709      0.842      0.856      0.488\n",
      "             aeroplane       4952        285      0.674      0.702      0.722      0.371\n",
      "               bicycle       4952        337      0.777      0.736      0.814      0.411\n",
      "                  bird       4952        459      0.533      0.667      0.619      0.275\n",
      "                  boat       4952        263       0.37      0.563      0.444      0.189\n",
      "                bottle       4952        469      0.454      0.769      0.649      0.353\n",
      "                   bus       4952        213      0.941      0.671       0.86      0.574\n",
      "                   cat       4952        358       0.78      0.757      0.803      0.457\n",
      "                 chair       4952        756      0.502      0.636      0.586      0.301\n",
      "                   cow       4952        244       0.74      0.635      0.716      0.422\n",
      "           diningtable       4952        206      0.851      0.415      0.699      0.414\n",
      "                   dog       4952        489      0.732      0.652      0.736      0.396\n",
      "                 horse       4952        348      0.813       0.75      0.823      0.428\n",
      "             motorbike       4952        325      0.879      0.649      0.839      0.451\n",
      "           pottedplant       4952        480      0.457      0.446       0.43      0.199\n",
      "                 sheep       4952        242      0.495      0.773      0.714      0.387\n",
      "                  sofa       4952        239      0.656      0.454      0.569      0.375\n",
      "                 train       4952        282      0.742      0.755      0.776      0.407\n",
      "             tvmonitor       4952        308      0.528      0.857        0.8       0.46\n",
      "Results saved to \u001b[1mruns/train/Baseline_2\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 1 \\\n",
    "--name Baseline_2 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# 这里用的数据集是单纯的新数据集，且data的yaml文件可以是不包含旧数据集的类别的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c843387-dcb1-4fd7-a76e-6e92ff01fc21",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "539a10b8-a28d-4f3b-9ce1-783093a4419b",
   "metadata": {},
   "source": [
    "2、增量训练"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2cacfae8-3942-4077-ae68-c6776d680cc2",
   "metadata": {},
   "source": [
    "(1)、不使用增量方法进行增量训练（用于体现增量方法效果的一种baseline）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "a8f71d31-dbe6-4ec7-a0ad-a4e998993a9c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/Baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=n_r_baseline, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/Baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/n_r_baseline4/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/n_r_baseline4\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      3.47G    0.08825    0.04357    0.07711        116        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "        0/0      3.47G     0.0577    0.03515    0.05043         36        640: 1\n",
      "tensor([0.73745], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.639      0.682      0.694      0.375\n",
      "\n",
      "1 epochs completed in 0.036 hours.\n",
      "Optimizer stripped from runs/train/n_r_baseline4/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/n_r_baseline4/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/n_r_baseline4/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.639      0.682      0.694      0.375\n",
      "                   car       4952       1201      0.571      0.901      0.859      0.495\n",
      "                person       4952       4528      0.614      0.871      0.831      0.484\n",
      "             aeroplane       4952        285      0.744      0.653      0.706      0.275\n",
      "               bicycle       4952        337      0.658      0.745      0.729       0.37\n",
      "                  bird       4952        459      0.555      0.717      0.681      0.312\n",
      "                  boat       4952        263      0.517       0.51      0.492      0.222\n",
      "                bottle       4952        469      0.458      0.757      0.673      0.324\n",
      "                   bus       4952        213      0.958      0.539      0.771      0.537\n",
      "                   cat       4952        358      0.643      0.768      0.726       0.42\n",
      "                 chair       4952        756      0.456      0.685      0.594      0.308\n",
      "                   cow       4952        244      0.814      0.484      0.678      0.394\n",
      "           diningtable       4952        206      0.846      0.426      0.666      0.356\n",
      "                   dog       4952        489      0.682      0.703      0.744      0.452\n",
      "                 horse       4952        348      0.862      0.793      0.866      0.425\n",
      "             motorbike       4952        325      0.794      0.723      0.784      0.419\n",
      "           pottedplant       4952        480       0.35        0.5      0.371       0.16\n",
      "                 sheep       4952        242      0.523       0.76       0.68      0.395\n",
      "                  sofa       4952        239      0.647      0.477      0.561       0.33\n",
      "                 train       4952        282      0.636      0.768       0.71      0.391\n",
      "             tvmonitor       4952        308       0.45      0.864      0.761      0.427\n",
      "Results saved to \u001b[1mruns/train/n_r_baseline4\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "model = 'n_r_baseline'\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 1 \\\n",
    "--weights ./runs/train/Baseline/weights/best.pt \\\n",
    "--name {model} \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# 这里用的数据集是单纯的新数据集，但是data的yaml文件是包括新旧数据集的所有类别的\n",
    "# 这里用数据集一训练的模型，直接在数据集二上进行增量训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "537efd97-1a03-4b5f-8a5b-022cbc0cc604",
   "metadata": {},
   "outputs": [],
   "source": [
    "# baseline\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights ./runs/train/{model}/weights/best.pt \\\n",
    "--task test &&\\\n",
    "echo 'KITTI val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 增量训练环节需要测试模型在旧数据集上的表现，判断对灾难性遗忘的环节能力\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "58485507-2884-4520-963d-0f0b021ee7c7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "0901d146-7267-4ee3-860e-dbf916d99f52",
   "metadata": {},
   "source": [
    "(2)、使用EWC增量训练（这里不需要在新数据集中混入一定的旧数据）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "a43b00c3-80b5-4ebf-abcc-c687de982aca",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "/root/autodl-tmp/yolo_incremental_learning/utils/autobatch.py:15: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with torch.cuda.amp.autocast(amp):\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mComputing optimal batch size for --imgsz 640\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mCUDA:0 (NVIDIA GeForce RTX 3090) 23.57G total, 0.10G reserved, 0.05G allocated, 23.42G free\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      Params      GFLOPs  GPU_mem (GB)  forward (ms) backward (ms)                   input                  output\n",
      "     7041205       16.01         0.258         15.16          44.5        (1, 3, 640, 640)                    list\n",
      "     7041205       32.01         0.445         10.65         19.09        (2, 3, 640, 640)                    list\n",
      "     7041205       64.02         0.879         11.97         22.27        (4, 3, 640, 640)                    list\n",
      "     7041205         128         1.567         18.91          34.4        (8, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mUsing batch-size 97 for CUDA:0 18.88G/23.57G (80%) ✅\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "     7041205       256.1         3.158          34.4         48.48       (16, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning ../datasets/kitti/labels/train.cache... 4189 images, 0 backgrounds, 0 corrupt: 100%|██████████| 4189/4189 [00:00<?, ?it/s]\n",
      "/root/autodl-tmp/yolo_incremental_learning/EWC_module/fisher.py:160: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "fisher context saved at runs/train/Baseline/weights/fisher.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/Baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=EWC, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=runs/train/Baseline/weights/fisher.pt, ewc_lambda=0.001, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/Baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/EWC/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/EWC\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      3.53G    0.05768    0.03516    0.05042         36        640: 1\n",
      "tensor([0.72354], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.631      0.678       0.69      0.396\n",
      "\n",
      "1 epochs completed in 0.075 hours.\n",
      "Optimizer stripped from runs/train/EWC/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/EWC/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/EWC/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   "
     ]
    }
   ],
   "source": [
    "from EWC_module.fisher import cal_fisher\n",
    "cal_fisher('./runs/train/Baseline/weights/best.pt')\n",
    "model = 'EWC'\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 1 \\\n",
    "--weights ./runs/train/Baseline/weights/best.pt \\\n",
    "--ewc_pt runs/train/Baseline/weights/fisher.pt \\\n",
    "--ewc_lambda 1e-3 \\\n",
    "--name {model} \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf0dcd72-d8fd-451a-9cfa-c6321ff6b229",
   "metadata": {},
   "outputs": [],
   "source": [
    "# EWC\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights ./runs/train/{model}/weights/best.pt \\\n",
    "--task test &&\\\n",
    "echo 'KITTI val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 原始的EWC并不太适合CIL任务\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9cc4ccf6-8cd3-4d8b-be27-188a26e4008d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "9a31a12f-a4ae-4847-a844-670dd4fa5e37",
   "metadata": {},
   "source": [
    "(3)、使用SI增量训练（这里不需要在新数据集中混入一定的旧数据）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "44de6497-7f0e-4ac4-8e53-520672761332",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/Baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=SI, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=./runs/train/Baseline/weights/si.pt, SI_lambda=0.1, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/Baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/SI3/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/SI3\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      3.61G    0.05775    0.03513    0.05037         36        640: 1\n",
      "tensor([0.72718], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00824], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.653      0.667      0.697      0.394\n",
      "\n",
      "1 epochs completed in 0.081 hours.\n",
      "Optimizer stripped from runs/train/SI3/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/SI3/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/SI3/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.653      0.667      0.697      0.394\n",
      "                   car       4952       1201      0.497      0.883      0.774       0.48\n",
      "                person       4952       4528      0.607       0.86      0.823      0.487\n",
      "             aeroplane       4952        285      0.761      0.683      0.756      0.364\n",
      "               bicycle       4952        337      0.731      0.732      0.764      0.442\n",
      "                  bird       4952        459      0.632       0.71      0.715      0.386\n",
      "                  boat       4952        263      0.491      0.463      0.458      0.188\n",
      "                bottle       4952        469      0.518      0.738      0.692      0.361\n",
      "                   bus       4952        213      0.974      0.529      0.777      0.544\n",
      "                   cat       4952        358      0.737      0.765      0.795      0.472\n",
      "                 chair       4952        756      0.418      0.676      0.583      0.337\n",
      "                   cow       4952        244      0.756      0.457      0.626      0.348\n",
      "           diningtable       4952        206      0.871      0.392      0.655      0.356\n",
      "                   dog       4952        489      0.553      0.673       0.67      0.412\n",
      "                 horse       4952        348      0.864      0.751      0.859      0.491\n",
      "             motorbike       4952        325      0.857      0.698       0.83      0.441\n",
      "           pottedplant       4952        480      0.357      0.517      0.343       0.14\n",
      "                 sheep       4952        242      0.481      0.748      0.679      0.379\n",
      "                  sofa       4952        239       0.69      0.456      0.558      0.371\n",
      "                 train       4952        282      0.817      0.745      0.816      0.457\n",
      "             tvmonitor       4952        308      0.441      0.864      0.763       0.43\n",
      "Results saved to \u001b[1mruns/train/SI3\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "model = 'SI'\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 1 \\\n",
    "--weights ./runs/train/Baseline/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/Baseline/weights/si.pt \\\n",
    "--SI_lambda 1e-1 \\\n",
    "--name {model} \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "7c49f58b-250a-4dab-97ed-1471ff558eea",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['./runs/train/SI3/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2245 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2245      11978      0.884      0.119      0.142     0.0629\n",
      "                   car       2245       8402      0.636       0.52      0.586      0.258\n",
      "                   van       2245        871          1          0     0.0356     0.0192\n",
      "                 truck       2245        351          1          0     0.0183    0.00975\n",
      "                  tram       2245        156          1          0      0.104     0.0484\n",
      "                person       2245       1347      0.435      0.434      0.384      0.165\n",
      "        person_sitting       2245         91          1          0   0.000527   0.000165\n",
      "               cyclist       2245        468          1          0    0.00158   0.000433\n",
      "                  misc       2245        292          1          0    0.00665    0.00294\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp25\u001b[0m\n",
      "KITTI val successfully!\n"
     ]
    }
   ],
   "source": [
    "# SI\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights ./runs/train/{model}/weights/best.pt \\\n",
    "--task test &&\\\n",
    "echo 'KITTI val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 原始的SI也不太适合CIL任务\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f23bafd0-27e0-4ea2-b5ee-fa2f8240dc5b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "0b67c78a-75d8-489a-aefa-abb650c430f8",
   "metadata": {},
   "source": [
    "(4)、使用LwF增量训练（这里不需要在新数据集中混入一定的旧数据）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "019a05dd-526c-4db8-ad1c-89ed8bd3a8ff",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/Baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=LwF, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.001], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=['./runs/train/Baseline/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/Baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/LwF/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/LwF\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      3.72G    0.06188    0.03712    0.06142         77        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 129505 ms: Connection timed out\n",
      "        0/0      3.72G    0.05898    0.03604    0.05755         36        640: 1\n",
      "tensor([1.41603], device='cuda:0', grad_fn=<AddBackward0>) tensor(546.53998, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.636      0.591      0.626       0.34\n",
      "\n",
      "1 epochs completed in 0.057 hours.\n",
      "Optimizer stripped from runs/train/LwF/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/LwF/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/LwF/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.636      0.592      0.626       0.34\n",
      "                   car       4952       1201      0.546      0.826      0.783      0.441\n",
      "                person       4952       4528      0.684      0.772      0.766      0.431\n",
      "             aeroplane       4952        285      0.702      0.695      0.713      0.361\n",
      "               bicycle       4952        337      0.785      0.596       0.73      0.407\n",
      "                  bird       4952        459       0.66      0.627      0.647      0.314\n",
      "                  boat       4952        263      0.579      0.297      0.337      0.139\n",
      "                bottle       4952        469       0.54      0.708      0.665      0.326\n",
      "                   bus       4952        213      0.962      0.404      0.738      0.487\n",
      "                   cat       4952        358      0.629       0.79      0.734      0.428\n",
      "                 chair       4952        756      0.374      0.701      0.587      0.329\n",
      "                   cow       4952        244      0.564      0.434       0.48      0.245\n",
      "           diningtable       4952        206      0.663      0.228      0.434      0.248\n",
      "                   dog       4952        489      0.553      0.724      0.686      0.342\n",
      "                 horse       4952        348       0.87      0.718      0.818      0.451\n",
      "             motorbike       4952        325      0.841      0.375      0.586       0.28\n",
      "           pottedplant       4952        480      0.332      0.525      0.322      0.137\n",
      "                 sheep       4952        242      0.584      0.686      0.669      0.384\n",
      "                  sofa       4952        239      0.561      0.326      0.364      0.258\n",
      "                 train       4952        282      0.792      0.613      0.711      0.357\n",
      "             tvmonitor       4952        308      0.492      0.786      0.743      0.425\n",
      "Results saved to \u001b[1mruns/train/LwF\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "model = 'LwF'\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 1 \\\n",
    "--weights ./runs/train/Baseline/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-3 \\\n",
    "--Old_models \\\n",
    "    ./runs/train/Baseline/weights/last.pt \\\n",
    "--name {model} \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "a435eb85-d54a-4b6e-aae5-73f4a989c4dc",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['./runs/train/LwF/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2245 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2245      11978      0.894       0.13      0.154     0.0647\n",
      "                   car       2245       8402       0.72      0.539      0.647      0.285\n",
      "                   van       2245        871          1          0     0.0421      0.021\n",
      "                 truck       2245        351          1          0     0.0243    0.00913\n",
      "                  tram       2245        156          1          0    0.00342    0.00141\n",
      "                person       2245       1347      0.431        0.5      0.447      0.184\n",
      "        person_sitting       2245         91          1          0      0.026    0.00447\n",
      "               cyclist       2245        468          1          0     0.0348    0.00982\n",
      "                  misc       2245        292          1          0    0.00578    0.00277\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 2.6ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp27\u001b[0m\n",
      "KITTI val successfully!\n"
     ]
    }
   ],
   "source": [
    "# LwF\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights ./runs/train/{model}/weights/best.pt \\\n",
    "--task test &&\\\n",
    "echo 'KITTI val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# Lwf能缓解灾难性遗忘，但是在提高旧数据集效果的时候会导致新数据集效果变差。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ec6dbfd-7deb-433d-aed6-b0b33654dc9f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "f7879e60-dff4-4d4f-92ee-36c926e9479d",
   "metadata": {},
   "source": [
    "(5)、在新数据集中混入一定旧数据集，且不使用增量方法训练的模型（这里需要在新数据集中混入一定的旧数据）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "0aeab51b-e708-447d-bb47-cc5319161700",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/Baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIBiC_base.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=r_baseline, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/Baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 19692 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/r_baseline/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/r_baseline\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      3.47G    0.05911    0.03705    0.04689         90        640: 1\n",
      "tensor([1.39038], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.683      0.672      0.718      0.414\n",
      "\n",
      "1 epochs completed in 0.058 hours.\n",
      "Optimizer stripped from runs/train/r_baseline/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/r_baseline/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/r_baseline/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.683      0.672      0.718      0.414\n",
      "                   car       4952       1201      0.658      0.885      0.867      0.529\n",
      "                person       4952       4528      0.641      0.895      0.874        0.5\n",
      "             aeroplane       4952        285      0.757      0.716       0.77      0.403\n",
      "               bicycle       4952        337      0.819      0.753      0.828      0.497\n",
      "                  bird       4952        459      0.627      0.699      0.704      0.362\n",
      "                  boat       4952        263       0.73      0.433      0.539      0.256\n",
      "                bottle       4952        469      0.538       0.74      0.664      0.353\n",
      "                   bus       4952        213      0.981      0.477      0.772      0.554\n",
      "                   cat       4952        358      0.721      0.809      0.806      0.496\n",
      "                 chair       4952        756      0.445      0.714      0.626      0.362\n",
      "                   cow       4952        244      0.788      0.228      0.556      0.353\n",
      "           diningtable       4952        206      0.774      0.422      0.584      0.321\n",
      "                   dog       4952        489      0.749      0.724      0.797      0.474\n",
      "                 horse       4952        348      0.715      0.819      0.852      0.485\n",
      "             motorbike       4952        325      0.819      0.695      0.799      0.447\n",
      "           pottedplant       4952        480      0.347      0.594      0.456      0.211\n",
      "                 sheep       4952        242      0.649      0.674      0.697      0.389\n",
      "                  sofa       4952        239      0.538      0.536       0.55      0.335\n",
      "                 train       4952        282      0.809      0.766       0.82      0.479\n",
      "             tvmonitor       4952        308      0.546      0.857      0.804      0.467\n",
      "Results saved to \u001b[1mruns/train/r_baseline\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "model = 'r_baseline'\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIBiC_base.yaml \\\n",
    "--epochs 1 \\\n",
    "--weights ./runs/train/Baseline/weights/best.pt \\\n",
    "--name {model} \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "\n",
    "# 这里的数据集是混入一定旧数据的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "82f78faf-7cd4-459c-91ae-31d374c39587",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['./runs/train/r_baseline/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2245 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2245      11978       0.93      0.148      0.224      0.108\n",
      "                   car       2245       8402      0.793      0.718      0.794      0.422\n",
      "                   van       2245        871          1          0     0.0597     0.0278\n",
      "                 truck       2245        351          1          0      0.295      0.132\n",
      "                  tram       2245        156          1          0     0.0878     0.0469\n",
      "                person       2245       1347      0.643      0.466      0.492      0.209\n",
      "        person_sitting       2245         91          1          0   0.000552   0.000338\n",
      "               cyclist       2245        468          1          0     0.0511     0.0174\n",
      "                  misc       2245        292          1          0      0.016    0.00632\n",
      "Speed: 0.0ms pre-process, 0.8ms inference, 1.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp28\u001b[0m\n",
      "KITTI val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 有回放的\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights ./runs/train/{model}/weights/best.pt \\\n",
    "--task test &&\\\n",
    "echo 'KITTI val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "075e6626-18e9-4290-ac3d-1beca33ac4ba",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "d3e1bf53-0451-4595-b39b-f1a73d7732a1",
   "metadata": {},
   "source": [
    "(6)、使用DER增量训练（这里需要在新数据集中混入一定的旧数据）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "9c65d332-0df7-473f-a482-87a287fb5645",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/Baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIBiC_base.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=DER, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=True, DER_old_model=['./runs/train/Baseline/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "Model summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "extractors长度： 1\n",
      "首次创建 extractors\n",
      "成功拼接 extractors\n",
      "extractors共有模型个数： 2\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    166935  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [256, 512, 1024]]\n",
      "已知类别： 8\n",
      "YOLOv5s_VOCKITTI summary: 432 layers, 14279012 parameters, 7237807 gradients, 81.8 GFLOPs\n",
      "\n",
      "Transferred 342/1054 items from runs/train/Baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 114 weight(decay=0.0), 123 weight(decay=0.0005), 123 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 19692 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/DER3/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/DER3\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0      6.45G    0.06026    0.03592    0.04474         90        640: 1\n",
      "tensor([2.73285], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.659      0.685      0.705      0.391\n",
      "\n",
      "1 epochs completed in 0.103 hours.\n",
      "Optimizer stripped from runs/train/DER3/weights/last.pt, 29.1MB\n",
      "Optimizer stripped from runs/train/DER3/weights/best.pt, 29.1MB\n",
      "\n",
      "Validating runs/train/DER3/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 375 layers, 14269508 parameters, 0 gradients, 81.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.66      0.685      0.705      0.391\n",
      "                   car       4952       1201       0.52      0.906      0.788       0.48\n",
      "                person       4952       4528      0.534      0.903      0.807      0.444\n",
      "             aeroplane       4952        285      0.715       0.73       0.72      0.326\n",
      "               bicycle       4952        337      0.737      0.807      0.836       0.49\n",
      "                  bird       4952        459      0.447      0.736      0.662      0.343\n",
      "                  boat       4952        263      0.624      0.443      0.518      0.248\n",
      "                bottle       4952        469      0.393        0.8      0.585      0.273\n",
      "                   bus       4952        213      0.861      0.695      0.832      0.535\n",
      "                   cat       4952        358      0.655      0.821      0.746      0.438\n",
      "                 chair       4952        756      0.472      0.701      0.633      0.346\n",
      "                   cow       4952        244      0.875      0.315      0.679      0.387\n",
      "           diningtable       4952        206      0.796      0.302       0.49      0.269\n",
      "                   dog       4952        489      0.712      0.732       0.77      0.443\n",
      "                 horse       4952        348      0.806      0.835      0.882        0.5\n",
      "             motorbike       4952        325      0.827      0.628       0.76      0.405\n",
      "           pottedplant       4952        480      0.351      0.585      0.422      0.176\n",
      "                 sheep       4952        242      0.713      0.682      0.747      0.451\n",
      "                  sofa       4952        239      0.785      0.519      0.648        0.4\n",
      "                 train       4952        282      0.823      0.741      0.814      0.464\n",
      "             tvmonitor       4952        308      0.551      0.815      0.757      0.402\n",
      "Results saved to \u001b[1mruns/train/DER3\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "model = 'DER'\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIBiC_base.yaml \\\n",
    "--epochs 1 \\\n",
    "--weights ./runs/train/Baseline/weights/best.pt \\\n",
    "--DER_enable \\\n",
    "--DER_old_model \\\n",
    "   ./runs/train/Baseline/weights/last.pt \\\n",
    "--name {model} \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "fa701e0f-c8f6-43ec-a268-9d86dc983ccc",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['./runs/train/DER3/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 375 layers, 14269508 parameters, 0 gradients, 81.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2245 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2245      11978      0.888       0.14       0.15     0.0696\n",
      "                   car       2245       8402      0.565      0.678      0.617      0.315\n",
      "                   van       2245        871          1          0     0.0468     0.0224\n",
      "                 truck       2245        351          1          0     0.0366     0.0226\n",
      "                  tram       2245        156          1          0    0.00431     0.0023\n",
      "                person       2245       1347       0.54      0.442      0.421      0.172\n",
      "        person_sitting       2245         91          1          0   0.000218   0.000175\n",
      "               cyclist       2245        468          1          0     0.0624     0.0192\n",
      "                  misc       2245        292          1          0    0.00707    0.00336\n",
      "Speed: 0.0ms pre-process, 1.3ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp30\u001b[0m\n",
      "KITTI val successfully!\n"
     ]
    }
   ],
   "source": [
    "# DER\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights ./runs/train/{model}/weights/best.pt \\\n",
    "--task test &&\\\n",
    "echo 'KITTI val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c5a5cc41-69e3-4cab-90da-bb3ab4e11d45",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "9ecaf537-38bd-4e41-bcb4-410f1ceae603",
   "metadata": {},
   "source": [
    "(7)、使用POD增量训练（这里需要在新数据集中混入一定的旧数据）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c195aa4-c60e-44ac-aa21-d6904f5abd6e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/Baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIBiC_base.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=POD, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=True, Distillation_layers=[1, 3, 5, 7, 9, 13, 17, 20, 23], POD_lambda=100.0, Old_models=['./runs/train/Baseline/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/Baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 19692 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/POD3/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/POD3\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      4.16G    0.05997    0.03701    0.04659         90        640: 1\n",
      "tensor([1.48435], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00075, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.702      0.665      0.716      0.408\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      6.36G    0.05024    0.03158    0.02797        104        640:  "
     ]
    }
   ],
   "source": [
    "pod_layers = '1 3 5 7 9 13 17 20 23'\n",
    "pod_name = pod_layers.replace(' ', '_')\n",
    "model = 'POD'\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIBiC_base.yaml \\\n",
    "--epochs 1 \\\n",
    "--weights ./runs/train/Baseline/weights/best.pt \\\n",
    "--PODNet_enable \\\n",
    "--Distillation_layers {pod_layers} \\\n",
    "--POD_lambda 1e2 \\\n",
    "--Old_models \\\n",
    "   ./runs/train/Baseline/weights/last.pt \\\n",
    "--name {model} \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9759d94-b5ca-492e-8a98-437392075ab8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# POD\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights ./runs/train/{model}/weights/best.pt \\\n",
    "--task test &&\\\n",
    "echo 'KITTI val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "956a6e21-6794-4312-8dc8-efa35158e190",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
