{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "cc1b0164-0702-4008-a01e-51fa2878fe5a",
   "metadata": {},
   "source": [
    "第一轮训练基础模型\n",
    "\n",
    "因为这只是测试整理完代码后还能不能正常跑，所以较少的epoch就够了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3978fcae-e916-49f8-aced-f109d42f4e8a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=30, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=baseline, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from yolov5s.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1047 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/baseline/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/baseline\u001b[0m\n",
      "Starting training for 30 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/29      3.65G    0.08043    0.04745    0.03265         85        640: 1\n",
      "tensor([1.67814], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.736      0.127      0.117     0.0414\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/29      3.65G    0.06457    0.03968    0.02276        150        640: 1\n",
      "tensor([1.57506], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.325      0.316      0.226      0.102\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/29      3.65G    0.05943    0.03867    0.01962        140        640: 1\n",
      "tensor([1.38143], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.528      0.362      0.327      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/29      3.65G    0.05272     0.0378    0.01627        121        640: 1\n",
      "tensor([1.29820], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.667      0.375      0.409      0.201\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/29      3.65G    0.04852    0.03658    0.01431        151        640: 1\n",
      "tensor([1.23307], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.718      0.468      0.484      0.236\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/29      3.65G    0.04542    0.03609    0.01248        138        640: 1\n",
      "tensor([1.17160], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.776      0.491      0.546      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/29      3.65G    0.04372    0.03508    0.01129        119        640: 1\n",
      "tensor([0.95667], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.625       0.59      0.609      0.307\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/29      3.65G    0.04224    0.03487    0.01032        168        640: 1\n",
      "tensor([1.16986], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.857      0.536      0.638      0.356\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/29      3.65G    0.04094    0.03417   0.009475        217        640: 1\n",
      "tensor([1.35510], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.677      0.622      0.669      0.375\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/29      3.65G    0.04002    0.03403   0.008806        125        640: 1\n",
      "tensor([1.07713], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.701      0.667      0.715      0.397\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/29      3.65G    0.03926    0.03327   0.007999        188        640: 1\n",
      "tensor([1.22432], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.683      0.662       0.72      0.411\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/29      3.65G    0.03838    0.03302     0.0076        202        640: 1\n",
      "tensor([1.15289], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.754       0.67      0.729      0.412\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/29      3.65G    0.03766     0.0327   0.007072        168        640: 1\n",
      "tensor([1.03640], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.764      0.729      0.778      0.462\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/29      3.65G    0.03669    0.03183   0.006461         84        640: 1\n",
      "tensor([0.81233], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.815      0.731      0.796      0.477\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/29      3.65G    0.03579     0.0313   0.006129        142        640: 1\n",
      "tensor([0.86386], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.777       0.73      0.792      0.482\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/29      3.65G    0.03532    0.03135   0.005941         81        640: 1\n",
      "tensor([0.67838], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.809      0.746      0.801      0.486\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/29      3.65G    0.03472    0.03096   0.005745        127        640: 1\n",
      "tensor([0.82479], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.764      0.766      0.797      0.491\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/29      3.65G    0.03415    0.03044    0.00525        134        640: 1\n",
      "tensor([0.90845], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769       0.81      0.755      0.811      0.496\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/29      3.65G     0.0333    0.02995   0.005061        108        640: 1\n",
      "tensor([0.78063], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.866      0.751      0.826      0.516\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/29      3.65G    0.03295     0.0302   0.004957        114        640: 1\n",
      "tensor([0.77887], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.817       0.79      0.837      0.514\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/29      3.65G    0.03237    0.02918   0.004628        112        640: 1\n",
      "tensor([0.76060], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.846      0.772      0.846      0.539\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/29      3.65G    0.03193    0.02924    0.00462        194        640: 1\n",
      "tensor([0.99254], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.843      0.799      0.844      0.531\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/29      3.65G    0.03156    0.02943   0.004408        174        640: 1\n",
      "tensor([0.87278], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.873      0.778      0.846      0.541\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/29      3.65G    0.03098    0.02851   0.004065        134        640: 1\n",
      "tensor([0.71552], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.827      0.791      0.851       0.55\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/29      3.65G    0.03048    0.02802   0.003881        128        640: 1\n",
      "tensor([0.71559], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769       0.88      0.764      0.857      0.563\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/29      3.65G    0.03031    0.02851   0.003796        181        640: 1\n",
      "tensor([0.90298], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.868      0.788      0.855      0.561\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/29      3.65G       0.03     0.0283   0.003863        129        640: 1\n",
      "tensor([0.75204], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.897      0.774      0.861      0.564\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/29      3.65G    0.02955    0.02724   0.003607        172        640: 1\n",
      "tensor([0.94415], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.874      0.779      0.857      0.573\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/29      3.65G    0.02938    0.02766   0.003599        153        640: 1\n",
      "tensor([0.85798], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.885      0.779      0.861      0.571\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/29      3.65G    0.02916    0.02727   0.003515        128        640: 1\n",
      "tensor([0.71388], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769      0.858      0.794      0.854      0.582\n",
      "\n",
      "30 epochs completed in 0.265 hours.\n",
      "Optimizer stripped from runs/train/baseline/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/baseline/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/baseline/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1047       5769       0.87      0.798      0.859      0.583\n",
      "                   Car       1047       4076      0.927      0.896      0.955      0.723\n",
      "                   Van       1047        423      0.917      0.867      0.927      0.691\n",
      "                 Truck       1047        154      0.897      0.935      0.951      0.736\n",
      "                  Tram       1047         70      0.918      0.971       0.96      0.697\n",
      "            Pedestrian       1047        666      0.876      0.692      0.809      0.439\n",
      "        Person_sitting       1047         23      0.685      0.379      0.518      0.287\n",
      "               Cyclist       1047        219      0.906      0.799      0.858      0.519\n",
      "                  Misc       1047        138      0.835      0.848      0.891      0.569\n",
      "Results saved to \u001b[1mruns/train/baseline\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 30 \\\n",
    "--name baseline \\\n",
    "--SI_enable \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e53cadd-d3b7-4491-88a9-f78c99ac8084",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d912aee3-5b6f-49cc-9906-761335eb9e5c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ae6351dc-421b-4631-8f31-bff14ba05bcb",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=n_r_baseline, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/n_r_baseline3/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/n_r_baseline3\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      3.47G    0.06226    0.04069    0.06142         36        640: 1\n",
      "tensor([0.86524], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.308      0.344        0.3      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      5.67G    0.04743    0.03296    0.03181         58        640: 1\n",
      "tensor([1.13971], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.659      0.596      0.648      0.359\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/14      5.67G    0.04561    0.03365    0.02375         37        640: 1\n",
      "tensor([0.67789], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.615      0.554      0.581      0.312\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/14      5.67G     0.0442    0.03482    0.02358         46        640: 1\n",
      "tensor([0.76971], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.626      0.575      0.619      0.351\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/14      5.67G    0.04324    0.03484    0.02246         39        640: 1\n",
      "tensor([0.79339], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.676      0.581      0.635      0.356\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/14      5.67G    0.04146    0.03413    0.02033         28        640: 1\n",
      "tensor([0.60268], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.675      0.629      0.682      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/14      5.67G    0.04017    0.03373    0.01865         39        640: 1\n",
      "tensor([0.65691], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.684      0.652      0.697       0.42\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/14      5.67G    0.03892    0.03298    0.01734         34        640: 1\n",
      "tensor([0.60494], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.714      0.664      0.715      0.436\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/14      5.67G    0.03788    0.03236    0.01616         41        640: 1\n",
      "tensor([0.54293], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.723      0.676      0.735      0.458\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/14      5.67G    0.03654    0.03127    0.01489         46        640: 1\n",
      "tensor([0.56140], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.732      0.703      0.759      0.484\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/14      5.67G    0.03579    0.03126    0.01388         30        640: 1\n",
      "tensor([0.51129], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.739      0.711       0.77      0.499\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/14      5.67G    0.03461    0.03092    0.01292         26        640: 1\n",
      "tensor([0.44112], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.75      0.731       0.78      0.514\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/14      5.67G    0.03353    0.03023    0.01174         33        640: 1\n",
      "tensor([0.56133], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.748      0.747      0.792      0.527\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/14      5.67G    0.03271    0.02973    0.01096         30        640: 1\n",
      "tensor([0.47493], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.769       0.74      0.798      0.539\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/14      5.67G    0.03197    0.02911    0.01021         33        640: 1\n",
      "tensor([0.54383], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.773      0.748      0.807      0.551\n",
      "\n",
      "15 epochs completed in 0.545 hours.\n",
      "Optimizer stripped from runs/train/n_r_baseline3/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/n_r_baseline3/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/n_r_baseline3/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.773      0.747      0.807      0.551\n",
      "                   car       4952       1201      0.817      0.882      0.916      0.691\n",
      "                person       4952       4528      0.847      0.796      0.884      0.577\n",
      "             aeroplane       4952        285      0.907      0.786      0.867      0.558\n",
      "               bicycle       4952        337      0.875      0.792      0.884      0.612\n",
      "                  bird       4952        459      0.789      0.708      0.781      0.492\n",
      "                  boat       4952        263      0.686      0.605      0.687      0.401\n",
      "                bottle       4952        469      0.656      0.742      0.757      0.505\n",
      "                   bus       4952        213      0.824      0.826      0.889      0.733\n",
      "                   cat       4952        358       0.85      0.737      0.837      0.591\n",
      "                 chair       4952        756      0.591       0.63      0.646       0.42\n",
      "                   cow       4952        244      0.756      0.814      0.852      0.608\n",
      "           diningtable       4952        206      0.705      0.723      0.756      0.509\n",
      "                   dog       4952        489      0.796      0.688       0.82      0.551\n",
      "                 horse       4952        348      0.875      0.853        0.9      0.615\n",
      "             motorbike       4952        325      0.856      0.769      0.867      0.552\n",
      "           pottedplant       4952        480      0.641      0.535      0.577      0.327\n",
      "                 sheep       4952        242      0.706      0.814      0.823      0.593\n",
      "                  sofa       4952        239      0.701      0.669      0.738       0.54\n",
      "                 train       4952        282       0.84      0.781      0.859      0.568\n",
      "             tvmonitor       4952        308      0.749      0.793      0.807      0.582\n",
      "Results saved to \u001b[1mruns/train/n_r_baseline3\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--name n_r_baseline \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "46d71d1b-8c69-44c3-87a4-43e02282e79c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dc7bdd79-39f3-4c98-ab61-d7ff26136156",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "d6402fd3-d1b2-423c-82cf-b9f8791ae331",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=n_r_Lwf, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.0001], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=['./runs/train/baseline/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/n_r_Lwf6/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/n_r_Lwf6\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      3.72G    0.06489    0.04248    0.06214         36        640: 1\n",
      "tensor([1.28545], device='cuda:0', grad_fn=<AddBackward0>) tensor(3092.66382, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.47      0.238      0.209      0.102\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      5.93G    0.04882    0.03537    0.04002         58        640: 1\n",
      "tensor([1.58395], device='cuda:0', grad_fn=<AddBackward0>) tensor(4155.32520, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.566      0.518      0.534      0.288\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/14      5.93G     0.0463    0.03444    0.02839         37        640: 1\n",
      "tensor([1.32260], device='cuda:0', grad_fn=<AddBackward0>) tensor(5858.12207, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.602      0.583      0.612      0.333\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/14      5.93G    0.04382    0.03425    0.02511         46        640: 1\n",
      "tensor([1.38542], device='cuda:0', grad_fn=<AddBackward0>) tensor(6532.39258, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.651      0.612       0.66      0.378\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/14      5.93G    0.04249    0.03392    0.02295         39        640: 1\n",
      "tensor([1.42753], device='cuda:0', grad_fn=<AddBackward0>) tensor(6771.88184, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.678      0.614      0.665      0.383\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/14      5.93G    0.04077    0.03346    0.02101         28        640: 1\n",
      "tensor([1.25253], device='cuda:0', grad_fn=<AddBackward0>) tensor(6458.60693, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.675      0.652       0.69      0.414\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/14      5.93G    0.03956     0.0331    0.01955         39        640: 1\n",
      "tensor([1.36121], device='cuda:0', grad_fn=<AddBackward0>) tensor(7078.32471, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.701      0.649      0.703      0.424\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/14      5.93G    0.03849    0.03246    0.01861         34        640: 1\n",
      "tensor([1.33189], device='cuda:0', grad_fn=<AddBackward0>) tensor(7251.46094, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.705      0.672      0.722      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/14      5.93G     0.0375    0.03201    0.01753         41        640: 1\n",
      "tensor([1.14247], device='cuda:0', grad_fn=<AddBackward0>) tensor(5905.59863, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.709      0.685      0.734      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/14      5.93G    0.03645    0.03104    0.01629         46        640: 1\n",
      "tensor([1.19627], device='cuda:0', grad_fn=<AddBackward0>) tensor(5941.54639, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.732      0.692      0.752      0.472\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/14      5.93G    0.03567    0.03104     0.0155         30        640: 1\n",
      "tensor([1.17189], device='cuda:0', grad_fn=<AddBackward0>) tensor(5340.54297, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.734      0.703      0.756       0.48\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/14      5.93G    0.03469    0.03093    0.01475         26        640: 1\n",
      "tensor([1.05350], device='cuda:0', grad_fn=<AddBackward0>) tensor(6033.09766, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.729      0.717      0.762      0.493\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/14      5.93G    0.03371    0.03037    0.01381         33        640: 1\n",
      "tensor([1.13366], device='cuda:0', grad_fn=<AddBackward0>) tensor(5860.95459, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.741      0.714       0.77      0.503\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/14      5.93G    0.03296    0.02999    0.01315         30        640: 1\n",
      "tensor([1.00249], device='cuda:0', grad_fn=<AddBackward0>) tensor(5357.32617, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.75      0.726      0.778      0.513\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/14      5.93G    0.03233    0.02948    0.01243         33        640: 1\n",
      "tensor([1.08733], device='cuda:0', grad_fn=<AddBackward0>) tensor(5407.89111, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.746      0.732      0.783       0.52\n",
      "\n",
      "15 epochs completed in 0.554 hours.\n",
      "Optimizer stripped from runs/train/n_r_Lwf6/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/n_r_Lwf6/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/n_r_Lwf6/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.745      0.733      0.783       0.52\n",
      "                   car       4952       1201      0.786      0.877      0.903      0.668\n",
      "                person       4952       4528      0.817      0.803      0.869      0.567\n",
      "             aeroplane       4952        285      0.858      0.782      0.849      0.531\n",
      "               bicycle       4952        337      0.862       0.78       0.87      0.581\n",
      "                  bird       4952        459      0.736      0.669      0.705      0.437\n",
      "                  boat       4952        263      0.603       0.62      0.646      0.375\n",
      "                bottle       4952        469      0.654      0.725       0.73      0.479\n",
      "                   bus       4952        213        0.8      0.843      0.854      0.687\n",
      "                   cat       4952        358      0.828      0.687      0.804      0.536\n",
      "                 chair       4952        756      0.562      0.614      0.621      0.391\n",
      "                   cow       4952        244      0.688      0.783       0.81      0.562\n",
      "           diningtable       4952        206      0.735      0.686       0.76      0.483\n",
      "                   dog       4952        489      0.762      0.683      0.784      0.503\n",
      "                 horse       4952        348      0.876       0.81      0.887      0.599\n",
      "             motorbike       4952        325      0.821       0.76      0.849      0.538\n",
      "           pottedplant       4952        480      0.571      0.546      0.552      0.289\n",
      "                 sheep       4952        242      0.672      0.822      0.829      0.576\n",
      "                  sofa       4952        239      0.657      0.653      0.713      0.509\n",
      "                 train       4952        282       0.87      0.755      0.848      0.549\n",
      "             tvmonitor       4952        308      0.745       0.76      0.782      0.544\n",
      "Results saved to \u001b[1mruns/train/n_r_Lwf6\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "--Old_models \\\n",
    "    ./runs/train/baseline/weights/last.pt \\\n",
    "--name n_r_Lwf \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "c3766d41-06fb-4c49-80df-3b23830298df",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=n_r_Lwf, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.001], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=['./runs/train/baseline/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/n_r_Lwf7/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/n_r_Lwf7\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      3.72G    0.07402    0.04575    0.07005         36        640: 1\n",
      "tensor([2.49854], device='cuda:0', grad_fn=<AddBackward0>) tensor(1341.40991, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.426     0.0737     0.0632     0.0264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      5.94G    0.05696    0.04122    0.06211         58        640: 1\n",
      "tensor([3.00847], device='cuda:0', grad_fn=<AddBackward0>) tensor(1694.52527, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.42      0.164      0.143     0.0669\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/14      5.94G    0.05195    0.03998     0.0556         37        640: 1\n",
      "tensor([3.58220], device='cuda:0', grad_fn=<AddBackward0>) tensor(2641.98291, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.351      0.298      0.249       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/14      5.94G    0.04955    0.03872    0.04924         46        640: 1\n",
      "tensor([3.74187], device='cuda:0', grad_fn=<AddBackward0>) tensor(2755.88892, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.404      0.363      0.327      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/14      5.94G     0.0481    0.03821    0.04422         39        640: 1\n",
      "tensor([3.69774], device='cuda:0', grad_fn=<AddBackward0>) tensor(2709.30078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.463      0.419      0.404      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/14      5.94G    0.04668    0.03788     0.0406         28        640: 1\n",
      "tensor([3.49217], device='cuda:0', grad_fn=<AddBackward0>) tensor(2673.36011, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.487      0.432       0.43      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/14      5.94G    0.04591    0.03792    0.03842         39        640: 1\n",
      "tensor([3.52450], device='cuda:0', grad_fn=<AddBackward0>) tensor(2727.54932, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.505      0.466      0.457      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/14      5.94G    0.04523    0.03753    0.03764         34        640: 1\n",
      "tensor([3.42690], device='cuda:0', grad_fn=<AddBackward0>) tensor(2587.24902, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.53      0.484      0.491      0.252\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/14      5.94G    0.04457    0.03744    0.03658         41        640: 1\n",
      "tensor([2.66999], device='cuda:0', grad_fn=<AddBackward0>) tensor(1981.14417, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.525       0.49      0.493      0.256\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/14      5.94G    0.04402    0.03671    0.03577         46        640: 1\n",
      "tensor([2.49186], device='cuda:0', grad_fn=<AddBackward0>) tensor(1661.71155, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.538      0.497      0.498      0.258\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/14      5.94G    0.04384    0.03718    0.03514         30        640: 1\n",
      "tensor([2.33157], device='cuda:0', grad_fn=<AddBackward0>) tensor(1527.54480, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.542      0.504      0.507      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/14      5.94G     0.0435    0.03762    0.03513         26        640: 1\n",
      "tensor([2.22683], device='cuda:0', grad_fn=<AddBackward0>) tensor(1531.50769, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.552      0.509      0.522      0.276\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/14      5.94G    0.04304     0.0374    0.03474         33        640: 1\n",
      "tensor([2.05201], device='cuda:0', grad_fn=<AddBackward0>) tensor(1291.23792, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.542      0.496      0.511      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/14      5.94G    0.04303    0.03735    0.03465         30        640: 1\n",
      "tensor([1.80786], device='cuda:0', grad_fn=<AddBackward0>) tensor(1025.56482, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.543      0.518       0.52      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/14      5.94G    0.04297    0.03726    0.03438         33        640: 1\n",
      "tensor([1.67803], device='cuda:0', grad_fn=<AddBackward0>) tensor(857.51239, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.557      0.507      0.521      0.279\n",
      "\n",
      "15 epochs completed in 0.557 hours.\n",
      "Optimizer stripped from runs/train/n_r_Lwf7/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/n_r_Lwf7/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/n_r_Lwf7/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.557      0.507      0.521       0.28\n",
      "                   car       4952       1201      0.694      0.718      0.751      0.497\n",
      "                person       4952       4528      0.709       0.66      0.714      0.386\n",
      "             aeroplane       4952        285       0.54      0.568      0.544      0.254\n",
      "               bicycle       4952        337      0.694      0.565      0.643      0.343\n",
      "                  bird       4952        459      0.484      0.379      0.397      0.197\n",
      "                  boat       4952        263      0.406      0.403      0.336       0.14\n",
      "                bottle       4952        469      0.465      0.503      0.478      0.263\n",
      "                   bus       4952        213      0.555      0.577      0.608      0.408\n",
      "                   cat       4952        358      0.679      0.363      0.512      0.236\n",
      "                 chair       4952        756      0.422      0.386      0.375      0.196\n",
      "                   cow       4952        244      0.547      0.533       0.51      0.308\n",
      "           diningtable       4952        206      0.532      0.515      0.476      0.213\n",
      "                   dog       4952        489      0.603      0.366      0.465      0.229\n",
      "                 horse       4952        348      0.541      0.603      0.584      0.291\n",
      "             motorbike       4952        325      0.654      0.545      0.596      0.304\n",
      "           pottedplant       4952        480      0.435      0.327      0.303       0.12\n",
      "                 sheep       4952        242      0.438      0.591      0.572      0.362\n",
      "                  sofa       4952        239      0.577      0.427      0.458      0.258\n",
      "                 train       4952        282      0.612      0.532       0.56      0.264\n",
      "             tvmonitor       4952        308      0.547      0.568      0.546       0.32\n",
      "Results saved to \u001b[1mruns/train/n_r_Lwf7\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-3 \\\n",
    "--Old_models \\\n",
    "    ./runs/train/baseline/weights/last.pt \\\n",
    "--name n_r_Lwf \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f558f47-ed7c-44fe-abd9-a5b324a95f83",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7468f2f9-2590-4e97-a664-c4a9da386f17",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "4cc02cf3-ad07-4411-8350-8afb34fa60ab",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "/root/autodl-tmp/yolo_incremental_learning/utils/autobatch.py:15: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with torch.cuda.amp.autocast(amp):\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mComputing optimal batch size for --imgsz 640\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mCUDA:0 (NVIDIA GeForce RTX 3090) 23.57G total, 0.10G reserved, 0.05G allocated, 23.42G free\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      Params      GFLOPs  GPU_mem (GB)  forward (ms) backward (ms)                   input                  output\n",
      "     7041205       16.01         0.258         12.79         40.29        (1, 3, 640, 640)                    list\n",
      "     7041205       32.01         0.445          9.97         18.06        (2, 3, 640, 640)                    list\n",
      "     7041205       64.02         0.879          9.88         19.27        (4, 3, 640, 640)                    list\n",
      "     7041205         128         1.567          12.4         24.73        (8, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mUsing batch-size 97 for CUDA:0 18.88G/23.57G (80%) ✅\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "     7041205       256.1         3.158         21.15         34.61       (16, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning ../datasets/kitti/labels/train... 4189 images, 0 backgrounds, 0 corrupt: 100%|██████████| 4189/4189 [00:00<00:00, 7451.23it/s]\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../datasets/kitti/labels/train.cache\n",
      "/root/autodl-tmp/yolo_incremental_learning/EWC_module/fisher.py:160: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "fisher context saved at runs/train/baseline/weights/fisher.pt\n"
     ]
    }
   ],
   "source": [
    "from EWC_module.fisher import cal_fisher\n",
    "cal_fisher('./runs/train/baseline/weights/best.pt')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec1ac054-c73d-43bb-8f46-909eced6ae0c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=test2, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=runs/train/baseline/weights/fisher.pt, ewc_lambda=0.001, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 314daedd Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/test25/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/test25\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      3.53G    0.08608    0.04725    0.07603        108        640:  "
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--ewc_pt runs/train/baseline/weights/fisher.pt \\\n",
    "--ewc_lambda 1e-3 \\\n",
    "--name test2 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6e18fe76-0ba1-4638-810e-6f6375005240",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "4883d8a4-c55f-45b1-b5bb-4b9a60843d32",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTI.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=SI, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=./runs/train/baseline/weights/si.pt, SI_lambda=0.1, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/SI2/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/SI2\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      3.61G    0.06226    0.04069    0.06138         36        640: 1\n",
      "tensor([0.86033], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00177], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.367      0.352      0.306      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      5.82G    0.04736    0.03296    0.03183         58        640: 1\n",
      "tensor([1.15460], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00237], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.665      0.622      0.667      0.374\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/14      5.82G     0.0456    0.03363    0.02359         37        640: 1\n",
      "tensor([0.71512], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00325], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.608      0.548      0.577       0.32\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/14      5.82G    0.04413    0.03498    0.02406         46        640: 1\n",
      "tensor([0.70393], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00483], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.614      0.581      0.604      0.338\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/14      5.82G    0.04336    0.03484    0.02248         39        640: 1\n",
      "tensor([0.77781], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00667], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.651      0.602      0.638      0.365\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/14      5.82G    0.04153    0.03415     0.0204         28        640: 1\n",
      "tensor([0.64148], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00842], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.676      0.638      0.682      0.401\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/14      5.82G    0.04029    0.03369    0.01854         39        640: 1\n",
      "tensor([0.69207], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.00992], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.675      0.627      0.671      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/14      5.82G    0.03884    0.03296    0.01751         34        640: 1\n",
      "tensor([0.62795], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01119], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.728      0.676      0.735      0.454\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/14      5.82G    0.03781    0.03238     0.0164         41        640: 1\n",
      "tensor([0.57265], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01226], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.717       0.69       0.74      0.465\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/14      5.82G    0.03658     0.0313    0.01495         46        640: 1\n",
      "tensor([0.57259], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01314], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.732      0.698      0.753      0.483\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/14      5.82G    0.03587    0.03123    0.01387         30        640: 1\n",
      "tensor([0.56209], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01386], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.739      0.709      0.769      0.498\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/14      5.82G    0.03465    0.03097    0.01294         26        640: 1\n",
      "tensor([0.42871], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01444], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.752       0.73      0.786      0.518\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/14      5.82G    0.03356    0.03027    0.01179         33        640: 1\n",
      "tensor([0.62272], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01490], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.754      0.736      0.794       0.53\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/14      5.82G    0.03276    0.02973     0.0111         30        640: 1\n",
      "tensor([0.50600], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01527], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.764      0.747      0.804      0.542\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/14      5.82G    0.03195    0.02908    0.01015         33        640: 1\n",
      "tensor([0.54592], device='cuda:0', grad_fn=<AddBackward0>)  tensor([0.01555], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.75      0.759      0.805      0.547\n",
      "\n",
      "15 epochs completed in 0.689 hours.\n",
      "Optimizer stripped from runs/train/SI2/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/SI2/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/SI2/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.75      0.759      0.805      0.547\n",
      "                   car       4952       1201      0.804      0.892      0.921      0.694\n",
      "                person       4952       4528      0.829      0.821      0.885      0.578\n",
      "             aeroplane       4952        285      0.854       0.78      0.853      0.539\n",
      "               bicycle       4952        337      0.844      0.786      0.874      0.595\n",
      "                  bird       4952        459      0.741      0.706      0.773      0.484\n",
      "                  boat       4952        263      0.648      0.662      0.694      0.399\n",
      "                bottle       4952        469      0.639      0.733      0.754        0.5\n",
      "                   bus       4952        213        0.8      0.828      0.877      0.706\n",
      "                   cat       4952        358      0.843       0.78      0.849      0.605\n",
      "                 chair       4952        756      0.584      0.623      0.639      0.416\n",
      "                   cow       4952        244       0.72      0.828      0.845      0.614\n",
      "           diningtable       4952        206      0.665      0.714      0.728      0.501\n",
      "                   dog       4952        489      0.803      0.722      0.832      0.558\n",
      "                 horse       4952        348      0.859      0.853      0.895      0.623\n",
      "             motorbike       4952        325      0.842      0.785      0.881      0.552\n",
      "           pottedplant       4952        480      0.618      0.573       0.57      0.311\n",
      "                 sheep       4952        242      0.674      0.802      0.821      0.576\n",
      "                  sofa       4952        239      0.672      0.707      0.753      0.559\n",
      "                 train       4952        282      0.804      0.819      0.843      0.557\n",
      "             tvmonitor       4952        308       0.76       0.76      0.811      0.583\n",
      "Results saved to \u001b[1mruns/train/SI2\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/baseline/weights/si.pt \\\n",
    "--SI_lambda 1e-1 \\\n",
    "--name SI \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6233de1-00dc-4621-a74b-2754afd358a1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5321464a-0cd5-4a67-b5be-af02d0e96388",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ab0553bb-e46f-4532-a758-2d6051849a4e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8545e4c8-b008-4b58-a19b-aecfd84067a4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1a73a7f8-9038-4575-a64e-3c6bafa26ae6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIBiC_base.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=r_base, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old... 19692 images, \u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/r_base2/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/r_base2\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      3.47G    0.06126    0.04255    0.05404         90        640: 1\n",
      "tensor([1.56533], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.369      0.356      0.316      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      5.67G    0.04599    0.03337    0.02868         49        640: 1\n",
      "tensor([1.00962], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.668      0.619      0.654       0.36\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/14      5.67G    0.04449    0.03354    0.02102         65        640: 1\n",
      "tensor([1.12611], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.605      0.573      0.598      0.326\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/14      5.67G    0.04342    0.03454    0.02014         73        640: 1\n",
      "tensor([1.22605], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.67      0.623      0.665      0.385\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/14      5.67G    0.04233    0.03457    0.01861         93        640: 1\n",
      "tensor([1.10687], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.675      0.628      0.675      0.392\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/14      5.67G    0.04087    0.03407    0.01714         75        640: 1\n",
      "tensor([1.15294], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032        0.7      0.663      0.715      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/14      5.67G    0.03975    0.03351    0.01558         66        640: 1\n",
      "tensor([0.99162], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.709      0.665      0.723       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/14      5.67G    0.03844    0.03234    0.01453         70        640: 1\n",
      "tensor([0.89366], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.733      0.684      0.744      0.469\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/14      5.67G    0.03734    0.03203    0.01328         60        640: 1\n",
      "tensor([0.83320], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.733      0.697      0.756      0.485\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/14      5.67G    0.03669     0.0319     0.0124         73        640: 1\n",
      "tensor([0.89103], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.745      0.708      0.768      0.498\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/14      5.67G    0.03571    0.03141    0.01154         76        640: 1\n",
      "tensor([0.90947], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.746      0.718      0.777      0.508\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/14      5.67G    0.03475    0.03051    0.01073         63        640: 1\n",
      "tensor([0.86694], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.752      0.738      0.786       0.52\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/14      5.67G    0.03383    0.03031   0.009959         71        640: 1\n",
      "tensor([0.77350], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.766      0.748      0.803      0.535\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/14      5.67G    0.03297    0.02951   0.009402         94        640: 1\n",
      "tensor([0.76111], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.776      0.757      0.812      0.552\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/14      5.67G    0.03213    0.02933   0.008594        102        640: 1\n",
      "tensor([0.90564], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.769       0.77      0.817       0.56\n",
      "\n",
      "15 epochs completed in 0.568 hours.\n",
      "Optimizer stripped from runs/train/r_base2/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/r_base2/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/r_base2/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.769      0.768      0.817       0.56\n",
      "                   car       4952       1201      0.818      0.899      0.921       0.69\n",
      "                person       4952       4528      0.838       0.81      0.882      0.578\n",
      "             aeroplane       4952        285      0.911      0.846      0.899      0.581\n",
      "               bicycle       4952        337      0.868      0.822      0.901      0.629\n",
      "                  bird       4952        459      0.749      0.749      0.778      0.495\n",
      "                  boat       4952        263      0.615      0.681      0.685      0.398\n",
      "                bottle       4952        469      0.654      0.722       0.75       0.49\n",
      "                   bus       4952        213      0.819      0.831      0.889       0.72\n",
      "                   cat       4952        358      0.833      0.778      0.849      0.608\n",
      "                 chair       4952        756      0.629      0.639      0.678      0.435\n",
      "                   cow       4952        244      0.735      0.861       0.88       0.64\n",
      "           diningtable       4952        206      0.709      0.708      0.748      0.519\n",
      "                   dog       4952        489      0.811      0.703      0.828      0.568\n",
      "                 horse       4952        348      0.848      0.842      0.897      0.622\n",
      "             motorbike       4952        325      0.835      0.806      0.881      0.569\n",
      "           pottedplant       4952        480       0.67      0.529      0.571      0.299\n",
      "                 sheep       4952        242      0.726      0.839      0.855       0.62\n",
      "                  sofa       4952        239      0.681      0.674      0.746      0.557\n",
      "                 train       4952        282       0.88      0.809      0.869      0.588\n",
      "             tvmonitor       4952        308      0.752      0.815      0.833        0.6\n",
      "Results saved to \u001b[1mruns/train/r_base2\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIBiC_base.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--name r_base \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "07da156a-8e1f-4e03-9c8b-c080b0fedd3c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a8bc1428-00c9-4344-b19e-58ea88b6a7f2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "cb4b077b-61a0-4531-a6f9-e6b792603666",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIBiC_base.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=DER, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=False, Distillation_layers=[17, 20, 23], POD_lambda=1.0, Old_models=[], DER_enable=True, DER_old_model=['./runs/train/baseline/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2903 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "Model summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "extractors长度： 1\n",
      "首次创建 extractors\n",
      "成功拼接 extractors\n",
      "extractors共有模型个数： 2\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    166935  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [256, 512, 1024]]\n",
      "已知类别： 8\n",
      "YOLOv5s_VOCKITTI summary: 432 layers, 14279012 parameters, 7237807 gradients, 81.8 GFLOPs\n",
      "\n",
      "Transferred 342/1054 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 114 weight(decay=0.0), 123 weight(decay=0.0005), 123 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 19692 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/DER2/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/DER2\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      6.45G    0.06208     0.0411    0.05358         90        640: 1\n",
      "tensor([3.14453], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.274      0.296      0.268      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      8.66G    0.04761    0.03312    0.02978         49        640: 1\n",
      "tensor([2.14699], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.543      0.588      0.556      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/14      8.66G    0.04573    0.03257    0.02009         65        640: 1\n",
      "tensor([2.30529], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.62      0.606      0.644      0.351\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/14      8.66G    0.04331     0.0326    0.01762         73        640: 1\n",
      "tensor([2.49858], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.674      0.646      0.693      0.401\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/14      8.66G    0.04147    0.03255    0.01589         93        640: 1\n",
      "tensor([2.03275], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.719      0.666      0.728      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/14      8.66G    0.03996     0.0322    0.01468         75        640: 1\n",
      "tensor([2.01918], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.714      0.701      0.749      0.463\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/14      8.66G    0.03882    0.03186    0.01344         66        640: 1\n",
      "tensor([1.94219], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.733      0.706      0.762      0.478\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/14      8.66G    0.03762     0.0308    0.01262         70        640: 1\n",
      "tensor([1.67476], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.756      0.698      0.773      0.492\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/14      8.66G    0.03652    0.03058    0.01155         60        640: 1\n",
      "tensor([1.59997], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.737      0.708      0.769      0.497\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/14      8.66G    0.03591    0.03058    0.01091         73        640: 1\n",
      "tensor([1.76843], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.754      0.729      0.794      0.525\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/14      8.66G     0.0349    0.03019     0.0103         76        640: 1\n",
      "tensor([1.77515], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.757      0.735      0.798      0.526\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/14      8.66G    0.03388    0.02935    0.00948         63        640: 1\n",
      "tensor([1.69271], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.76      0.741      0.799      0.538\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/14      8.66G    0.03296    0.02919   0.008787         71        640: 1\n",
      "tensor([1.46914], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.78      0.747      0.815      0.553\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/14      8.66G    0.03208    0.02848   0.008393         94        640: 1\n",
      "tensor([1.49949], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.789      0.761      0.824      0.566\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/14      8.66G     0.0313    0.02842   0.007673        102        640: 1\n",
      "tensor([1.74481], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.777      0.772      0.827      0.574\n",
      "\n",
      "15 epochs completed in 0.992 hours.\n",
      "Optimizer stripped from runs/train/DER2/weights/last.pt, 29.1MB\n",
      "Optimizer stripped from runs/train/DER2/weights/best.pt, 29.1MB\n",
      "\n",
      "Validating runs/train/DER2/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 375 layers, 14269508 parameters, 0 gradients, 81.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.777      0.773      0.828      0.574\n",
      "                   car       4952       1201       0.81      0.893      0.922        0.7\n",
      "                person       4952       4528      0.837      0.821      0.889      0.593\n",
      "             aeroplane       4952        285      0.877      0.828       0.89      0.589\n",
      "               bicycle       4952        337      0.899      0.794      0.903      0.631\n",
      "                  bird       4952        459       0.76      0.752       0.79      0.517\n",
      "                  boat       4952        263      0.625      0.679      0.726      0.425\n",
      "                bottle       4952        469      0.703      0.727      0.779      0.529\n",
      "                   bus       4952        213      0.805      0.853        0.9      0.745\n",
      "                   cat       4952        358      0.883      0.776       0.88      0.637\n",
      "                 chair       4952        756       0.63      0.642      0.681       0.44\n",
      "                   cow       4952        244      0.746      0.879      0.876      0.638\n",
      "           diningtable       4952        206      0.779      0.683      0.774      0.543\n",
      "                   dog       4952        489      0.808      0.746      0.834      0.582\n",
      "                 horse       4952        348      0.866      0.885      0.913      0.649\n",
      "             motorbike       4952        325       0.85      0.803      0.887      0.571\n",
      "           pottedplant       4952        480      0.655      0.533      0.574      0.305\n",
      "                 sheep       4952        242      0.722      0.822      0.845      0.617\n",
      "                  sofa       4952        239      0.703      0.682      0.758      0.578\n",
      "                 train       4952        282      0.849      0.798      0.869      0.578\n",
      "             tvmonitor       4952        308      0.741      0.856      0.859      0.617\n",
      "Results saved to \u001b[1mruns/train/DER2\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIBiC_base.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--DER_enable \\\n",
    "--DER_old_model \\\n",
    "   ./runs/train/baseline/weights/last.pt \\\n",
    "--name DER \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc21f1e6-4d1f-43e6-8722-baba920b0ce7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "33b2adc6-a522-41cc-a9e0-2e5e9fc81216",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d84e7e2c-3ffe-40df-a50c-43410f7f8f32",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 这里数据集用错了，还得重来一次VOCKITTIBiC_base"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "20bce195-c2de-48e4-9cd6-a478515d81dd",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/root/autodl-tmp/yolo_incremental_learning/utils/general.py:32: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
      "  import pkg_resources as pkg\n",
      "\u001b[34m\u001b[1mtrain_final: \u001b[0mweights=./runs/train/baseline/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIBiC_base.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=15, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=POD, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, PODNet_enable=True, Distillation_layers=[1, 3, 5, 7, 9, 13, 17, 20, 23], POD_lambda=100.0, Old_models=['./runs/train/baseline/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 1fa09d14 Python-3.12.3 torch-2.5.1+cu124 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo_final.Detect                [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/baseline/weights/best.pt\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "/root/autodl-tmp/yolo_incremental_learning/models/common.py:894: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n",
      "  with amp.autocast(autocast):\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 19692 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/POD2/labels.jpg... \n",
      "/root/autodl-tmp/yolo_incremental_learning/train_final.py:407: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/POD2\u001b[0m\n",
      "Starting training for 15 epochs...\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo_final.Detect                [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/14      4.16G    0.09839     0.0482    0.07737        103        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "       0/14      4.16G    0.06219    0.04342    0.05477         90        640: 1\n",
      "tensor([1.70960], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00111, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.471      0.281      0.241      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/14      6.36G    0.04575    0.03431     0.0311         49        640: 1\n",
      "tensor([1.24772], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00154, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.606      0.595      0.619      0.343\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/14      6.36G    0.04414     0.0337    0.02231         65        640: 1\n",
      "tensor([1.41281], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00233, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.629      0.589      0.629      0.345\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/14      6.36G    0.04268    0.03394     0.0199         73        640: 1\n",
      "tensor([1.50215], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00245, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.651      0.623      0.662      0.376\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/14      6.36G    0.04151    0.03389    0.01829         93        640: 1\n",
      "tensor([1.24888], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00223, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.672      0.638      0.683      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/14      6.36G    0.04018    0.03344     0.0169         75        640: 1\n",
      "tensor([1.30647], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00210, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.709      0.658      0.716      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/14      6.36G    0.03917    0.03308    0.01564         66        640: 1\n",
      "tensor([1.14435], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00211, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.68      0.672      0.711      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/14      6.36G     0.0381    0.03194     0.0146         70        640: 1\n",
      "tensor([1.08221], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00198, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.726      0.675      0.737      0.462\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/14      6.36G    0.03697    0.03173    0.01349         60        640: 1\n",
      "tensor([1.04007], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00195, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.728      0.677      0.739      0.468\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/14      6.36G     0.0364    0.03176     0.0128         73        640: 1\n",
      "tensor([1.09032], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00207, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.736      0.717      0.763      0.487\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/14      6.36G    0.03551    0.03127    0.01216         76        640: 1\n",
      "tensor([1.11348], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00170, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.735      0.707      0.762      0.492\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/14      6.36G    0.03458    0.03046    0.01134         63        640: 1\n",
      "tensor([1.07193], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00189, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.748      0.714      0.772      0.507\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/14      6.36G    0.03373    0.03026    0.01054         71        640: 1\n",
      "tensor([0.90611], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00163, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.754      0.718       0.78      0.517\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/14      6.36G     0.0329    0.02961     0.0101         94        640: 1\n",
      "tensor([0.91912], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00154, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.769      0.729      0.788      0.526\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/14      6.36G    0.03219    0.02951   0.009342        102        640: 1\n",
      "tensor([1.06325], device='cuda:0', grad_fn=<AddBackward0>) tensor(0.00152, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.763      0.735      0.791      0.534\n",
      "\n",
      "15 epochs completed in 0.684 hours.\n",
      "Optimizer stripped from runs/train/POD2/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/POD2/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/POD2/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.761      0.735      0.791      0.534\n",
      "                   car       4952       1201      0.777      0.885      0.909      0.676\n",
      "                person       4952       4528      0.839      0.807      0.876       0.57\n",
      "             aeroplane       4952        285      0.872      0.804       0.86      0.547\n",
      "               bicycle       4952        337       0.89      0.786      0.881      0.599\n",
      "                  bird       4952        459        0.7      0.712      0.737      0.467\n",
      "                  boat       4952        263      0.633      0.612      0.668       0.38\n",
      "                bottle       4952        469      0.718      0.705      0.742      0.499\n",
      "                   bus       4952        213      0.804       0.79      0.872       0.69\n",
      "                   cat       4952        358       0.85      0.695      0.822      0.569\n",
      "                 chair       4952        756      0.595      0.603      0.631      0.404\n",
      "                   cow       4952        244      0.712       0.84      0.833      0.604\n",
      "           diningtable       4952        206      0.723      0.658      0.735      0.482\n",
      "                   dog       4952        489       0.78      0.689      0.788      0.522\n",
      "                 horse       4952        348      0.886      0.845      0.891      0.611\n",
      "             motorbike       4952        325      0.851      0.794      0.869      0.552\n",
      "           pottedplant       4952        480      0.633      0.483      0.533      0.288\n",
      "                 sheep       4952        242      0.718      0.814      0.827      0.591\n",
      "                  sofa       4952        239      0.664      0.645      0.704      0.508\n",
      "                 train       4952        282      0.872      0.759      0.839      0.546\n",
      "             tvmonitor       4952        308       0.71      0.778      0.802       0.57\n",
      "Results saved to \u001b[1mruns/train/POD2\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "pod_layers = '1 3 5 7 9 13 17 20 23'\n",
    "pod_name = pod_layers.replace(' ', '_')\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_final.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIBiC_base.yaml \\\n",
    "--epochs 15 \\\n",
    "--weights ./runs/train/baseline/weights/best.pt \\\n",
    "--PODNet_enable \\\n",
    "--Distillation_layers {pod_layers} \\\n",
    "--POD_lambda 1e2 \\\n",
    "--Old_models \\\n",
    "   ./runs/train/baseline/weights/last.pt \\\n",
    "--name POD \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "86c39e89-089d-438e-a8f5-2d1835c30542",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dd00c2f8-f92f-4e56-9361-adb3db3805cd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "acff7727-8838-4025-9530-26bcf87a96bc",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
