{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "30cc87d5-824a-40c0-b66c-623f25540926",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setup complete ✅ (112 CPUs, 755.3 GB RAM, 25.8/30.0 GB disk)\n"
     ]
    }
   ],
   "source": [
    "import comet_ml\n",
    "import torch\n",
    "import utils\n",
    "\n",
    "comet_ml.init(project_name='exp_100epoch')\n",
    "# 这里应该会包含100epoch的0,0.6,1.2加雾以及各个以100epoch为单位的增量\n",
    "display = utils.notebook_init()  # checks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "d62f49c2-e213-46ee-b286-7161e549f8a4",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_VisVOCKITTI.yaml, data=data/VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=VisDrone_base, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/9fc69a8ebf23471e867be13a5073afca\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     91704  models.yolo.Detect                      [29, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VisVOCKITTI summary: 217 layers, 7097848 parameters, 7097848 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from yolov5s.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/labels\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m2.95 anchors/target, 0.933 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29644 of 343201 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 342304 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.7493: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9995 best possible recall, 5.74 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.364/0.748-mean/best, past_thr=0.485-mean: 3,5, 4,9, 8,7, 8,15, 16,9, 16,21, 33,17, 29,37, 61,63\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/VisDrone_base/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/VisDrone_base\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.64G      0.128     0.1338    0.05868        431        640: 1\n",
      "tensor([2.05019], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.277      0.149     0.0827     0.0342\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.84G     0.1099      0.169    0.03976        589        640: 1\n",
      "tensor([2.39039], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.198      0.199     0.0998     0.0422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.84G     0.1081     0.1716    0.03644        586        640: 1\n",
      "tensor([2.40948], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.468      0.186      0.144     0.0609\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.84G     0.1057     0.1694    0.03406        785        640: 1\n",
      "tensor([2.29534], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.427       0.19      0.167     0.0773\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.84G     0.1034     0.1705    0.03261        417        640: 1\n",
      "tensor([2.05576], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.545      0.201      0.185     0.0882\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.84G      0.102     0.1693    0.03165        276        640: 1\n",
      "tensor([1.61193], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.444      0.224      0.203     0.0953\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.84G     0.1014     0.1679    0.03082        436        640: 1\n",
      "tensor([2.01963], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.324      0.242      0.216      0.107\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.84G     0.1008     0.1679    0.03024        521        640: 1\n",
      "tensor([2.03945], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.299      0.237      0.222      0.109\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.84G    0.09974     0.1675     0.0297        326        640: 1\n",
      "tensor([1.69504], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.329      0.257      0.233      0.116\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.84G    0.09936     0.1665    0.02933        498        640: 1\n",
      "tensor([1.97692], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.32      0.274      0.238      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.84G    0.09945     0.1657    0.02892        502        640: 1\n",
      "tensor([1.91425], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.336      0.269      0.241      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.84G    0.09854     0.1663     0.0285        568        640: 1\n",
      "tensor([2.15358], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.343      0.285       0.25      0.125\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.84G    0.09844     0.1663    0.02812        572        640: 1\n",
      "tensor([2.18566], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.354      0.286      0.255      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.84G    0.09787     0.1639     0.0279        560        640: 1\n",
      "tensor([2.19205], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.359      0.288       0.26      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.84G    0.09772     0.1659    0.02765        466        640: 1\n",
      "tensor([1.95648], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.363      0.291      0.263      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.84G    0.09744     0.1641     0.0275        709        640: 1\n",
      "tensor([2.32710], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.37      0.296      0.266      0.137\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.84G    0.09759     0.1651    0.02727        440        640: 1\n",
      "tensor([1.82875], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.37      0.294      0.267      0.137\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.84G    0.09746     0.1648    0.02707        580        640: 1\n",
      "tensor([1.92519], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.384      0.297       0.27       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.84G    0.09676     0.1638    0.02695        503        640: 1\n",
      "tensor([1.95182], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.378      0.293      0.269      0.139\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.84G    0.09678     0.1642    0.02667        426        640: 1\n",
      "tensor([1.71241], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.38      0.301       0.28      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.84G    0.09654     0.1612    0.02642        705        640: 1\n",
      "tensor([1.95479], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.382      0.304      0.283      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.84G    0.09621      0.161    0.02635        907        640: 1\n",
      "tensor([2.53345], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.395      0.307      0.283      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.84G    0.09576     0.1613    0.02616        591        640: 1\n",
      "tensor([2.14569], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.39      0.308      0.289      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.84G    0.09587     0.1596    0.02604        567        640: 1\n",
      "tensor([1.93450], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.396      0.305      0.287      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.84G    0.09563     0.1597    0.02584        519        640: 1\n",
      "tensor([1.90109], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.388      0.312      0.292      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.84G    0.09526     0.1598    0.02572        751        640: 1\n",
      "tensor([2.14226], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.307      0.288      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.84G    0.09525     0.1596    0.02562        335        640: 1\n",
      "tensor([1.61556], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.305      0.295      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.84G    0.09493     0.1592    0.02559        754        640: 1\n",
      "tensor([2.47649], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.312      0.296      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.84G    0.09529     0.1586    0.02546        637        640: 1\n",
      "tensor([1.95017], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.407      0.308      0.295      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.84G    0.09554     0.1598    0.02534       1044        640: 1\n",
      "tensor([2.20750], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.421      0.307      0.299      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.84G    0.09483     0.1589    0.02525        288        640: 1\n",
      "tensor([1.48018], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.412      0.309      0.295      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.84G    0.09455     0.1582     0.0252        628        640: 1\n",
      "tensor([2.06083], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.399      0.315      0.298      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.84G    0.09506     0.1591    0.02495        580        640: 1\n",
      "tensor([2.06883], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.318      0.301       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.84G    0.09464     0.1589    0.02491        746        640: 1\n",
      "tensor([2.13884], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.323      0.303      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.84G    0.09415     0.1579     0.0249        455        640: 1\n",
      "tensor([1.79029], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.319      0.299       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.84G     0.0945     0.1575    0.02474        505        640: 1\n",
      "tensor([1.86406], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.417      0.321      0.308      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.84G     0.0943     0.1574    0.02465        434        640: 1\n",
      "tensor([1.81470], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.412      0.324      0.305      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.84G    0.09435     0.1577    0.02455        590        640: 1\n",
      "tensor([2.12986], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.414      0.326      0.309      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.84G    0.09364     0.1553    0.02453        686        640: 1\n",
      "tensor([2.09860], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.327      0.309      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.84G    0.09386     0.1554    0.02459        590        640: 1\n",
      "tensor([1.99239], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.419      0.324      0.309      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.84G    0.09358     0.1548    0.02422        650        640: 1\n",
      "tensor([2.04964], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.422      0.325      0.313      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.84G    0.09367     0.1538    0.02406        679        640: 1\n",
      "tensor([2.00177], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.42      0.329      0.311      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.84G    0.09356     0.1558    0.02412        508        640: 1\n",
      "tensor([1.81949], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.415       0.33      0.312      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.84G    0.09333     0.1556    0.02405        765        640: 1\n",
      "tensor([1.99127], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.416       0.33      0.315      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.84G    0.09344     0.1552    0.02399        518        640: 1\n",
      "tensor([1.98045], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.416      0.328      0.312      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.84G    0.09317     0.1538    0.02383        589        640: 1\n",
      "tensor([1.94522], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.422      0.328      0.314      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.84G    0.09319     0.1542     0.0237        615        640: 1\n",
      "tensor([2.06610], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.329      0.316       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.84G     0.0933     0.1536    0.02376        603        640: 1\n",
      "tensor([2.01973], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.42      0.329      0.316       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.84G     0.0936     0.1532    0.02379        606        640: 1\n",
      "tensor([1.94997], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.419      0.329      0.315      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.84G    0.09296      0.154    0.02358        549        640: 1\n",
      "tensor([1.92254], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.412      0.333      0.318      0.172\n",
      "\n",
      "50 epochs completed in 0.640 hours.\n",
      "Optimizer stripped from runs/train/VisDrone_base/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/VisDrone_base/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/VisDrone_base/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.41      0.311        0.3      0.164\n",
      "                   car        548      14064      0.593      0.704      0.706      0.465\n",
      "                   van        548       1975       0.42       0.34      0.334      0.224\n",
      "                 truck        548        750      0.435      0.281      0.282      0.164\n",
      "                person        548       5125      0.428      0.278      0.261     0.0905\n",
      "               bicycle        548       1287      0.222      0.146      0.103     0.0355\n",
      "                   bus        548        251      0.445       0.39       0.37      0.236\n",
      "             motorbike        548       4886      0.437      0.353      0.317      0.123\n",
      "            pedestrian        548       8844      0.438      0.375      0.371      0.153\n",
      "              tricycle        548       1045      0.427      0.147      0.158     0.0838\n",
      "       awning-tricycle        548        532      0.251     0.0959     0.0965     0.0613\n",
      "Results saved to \u001b[1mruns/train/VisDrone_base\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : VisDrone_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/9fc69a8ebf23471e867be13a5073afca\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_f1              : 0.1386836468439828\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_false_positives : 152.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5          : 0.09652244704643688\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5:.95      : 0.06133167982913308\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_precision       : 0.2506304333890541\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_recall          : 0.09586466165413533\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_support         : 532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_true_positives  : 51.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                      : 0.17614648906614133\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives         : 660.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                  : 0.10268929503778595\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95              : 0.03546933298185776\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision               : 0.22180614747795993\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                  : 0.14607614607614608\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support                 : 1287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives          : 188.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                          : 0.4160254377263856\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives             : 122.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                      : 0.36952674368462235\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                  : 0.2362276612639263\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                   : 0.44520151391809143\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                      : 0.3904382470119522\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                     : 251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives              : 98.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                          : 0.6435815625682618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives             : 6810.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                      : 0.7058119679195315\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                  : 0.46529791193246595\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                   : 0.5925655844331251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                      : 0.7042093287827076\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                     : 14064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives              : 9904.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2005]                     : (3.3714311122894287, 6.21133279800415)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]           : (0.08265186665208843, 0.31779847304139)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]      : (0.0341795172830672, 0.17163067390746278)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]         : (0.1975767715064895, 0.5454348977072963)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]            : (0.1485568814007791, 0.3328737628239343)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_f1                    : 0.39082293809897867\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_false_positives       : 2225.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5                : 0.31654910766108674\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5:.95            : 0.12338889699765247\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_precision             : 0.437020277078257\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_recall                : 0.3534588620548506\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_support               : 4886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_true_positives        : 1727.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_f1                   : 0.4041976667904952\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_false_positives      : 4252.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5               : 0.3709686236158385\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5:.95           : 0.15302048957961797\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_precision            : 0.43824870103148694\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_recall               : 0.3750565355042967\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_support              : 8844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_true_positives       : 3317.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                       : 0.3369711663277654\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives          : 1903.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                   : 0.2605346241322779\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95               : 0.09052794815752685\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision                : 0.42804388331075444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                   : 0.2778536585365854\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                  : 5125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives           : 1424.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]            : (0.09296489506959915, 0.1279529631137848)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]            : (0.023577185347676277, 0.05867963656783104)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]            : (0.1338007152080536, 0.1716068983078003)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_f1                     : 0.2190797727055038\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_false_positives        : 207.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5                 : 0.15753505665816261\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5:.95             : 0.0837804393452355\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_precision              : 0.42673395155697397\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_recall                 : 0.14736842105263157\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_support                : 1045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_true_positives         : 154.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                        : 0.34154036402040017\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives           : 274.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                    : 0.28204012259884303\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95                : 0.1642851612350567\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                 : 0.43491250473443543\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                    : 0.28117450784117454\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                   : 750\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives            : 211.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]              : (0.09155362844467163, 0.10631834715604782)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]              : (0.030062982812523842, 0.044229283928871155)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]              : (0.2271745502948761, 0.2360975444316864)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                          : 0.37565984708689976\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives             : 926.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                      : 0.33439132853137155\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                  : 0.22395163849844776\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                   : 0.4200626600912885\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                      : 0.33974683544303796\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                     : 1975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives              : 671.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                     : (0.0004960000000000005, 0.07007407407407407)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : VisDrone_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/9fc69a8ebf23471e867be13a5073afca\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : 0.0001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.18125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/VisDrone_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.21 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VisVOCKITTI.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights yolov5s.pt \\\n",
    "--name VisDrone_base \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#18：58"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a0a77dfa-692b-4885-bebc-5c1e10e337b0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "71763f2b-8bed-4b83-9c94-8b0fc132e387",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 重新训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "7dd72242-907f-4605-bdb8-f1f25a647028",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_VisVOCKITTI.yaml, data=data/VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=Vis_Voc_Kitti_base, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/adc566df0ff34c1a9ad43bac2684f316\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     91704  models.yolo.Detect                      [29, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VisVOCKITTI summary: 217 layers, 7097848 parameters, 7097848 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/labels\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m2.95 anchors/target, 0.933 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29644 of 343201 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 342304 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.7493: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9995 best possible recall, 5.74 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.364/0.748-mean/best, past_thr=0.485-mean: 3,5, 4,9, 8,7, 8,15, 16,9, 16,21, 33,17, 29,37, 61,63\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/Vis_Voc_Kitti_base/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/Vis_Voc_Kitti_base\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.64G     0.1343     0.1344    0.06474        431        640: 1\n",
      "tensor([2.14821], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.136      0.107     0.0439     0.0176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      3.84G      0.115     0.1657    0.04486        589        640: 1\n",
      "tensor([2.43725], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.272      0.152     0.0817     0.0333\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      3.84G     0.1103     0.1724    0.03936        586        640: 1\n",
      "tensor([2.45012], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.289      0.184      0.103     0.0441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      3.84G     0.1065     0.1711    0.03675        785        640: 1\n",
      "tensor([2.30915], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.47      0.193      0.139      0.062\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      3.84G     0.1042     0.1713    0.03462        417        640: 1\n",
      "tensor([2.07771], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.393      0.202      0.162     0.0758\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      3.84G     0.1027     0.1697    0.03319        276        640: 1\n",
      "tensor([1.65170], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.311      0.214      0.173     0.0806\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      3.84G     0.1021     0.1686    0.03223        436        640: 1\n",
      "tensor([2.02402], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.533      0.212      0.188      0.091\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      3.84G     0.1015     0.1684    0.03169        521        640: 1\n",
      "tensor([2.09745], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.394       0.21      0.193     0.0944\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      3.84G     0.1006     0.1685    0.03116        326        640: 1\n",
      "tensor([1.71003], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.275      0.235      0.211      0.104\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      3.84G     0.1003     0.1677    0.03077        498        640: 1\n",
      "tensor([2.00208], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.352      0.237      0.216      0.107\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      3.84G     0.1003     0.1665     0.0303        502        640: 1\n",
      "tensor([1.90161], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.319      0.251      0.221      0.107\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      3.84G    0.09949     0.1674    0.02983        568        640: 1\n",
      "tensor([2.17906], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.366      0.239      0.225      0.112\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      3.84G    0.09936     0.1673    0.02938        572        640: 1\n",
      "tensor([2.20074], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.337      0.258      0.232      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      3.84G    0.09886     0.1652    0.02922        560        640: 1\n",
      "tensor([2.25996], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.334      0.275      0.238      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      3.84G    0.09871     0.1673    0.02894        466        640: 1\n",
      "tensor([1.97896], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.346      0.276      0.244      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      3.84G    0.09838     0.1655    0.02878        709        640: 1\n",
      "tensor([2.34350], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.339      0.276      0.244      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      3.84G    0.09854     0.1665    0.02856        440        640: 1\n",
      "tensor([1.87334], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.329      0.287      0.247      0.125\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      3.84G    0.09844     0.1664    0.02836        580        640: 1\n",
      "tensor([1.94125], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.347      0.275      0.248      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      3.84G     0.0978     0.1655    0.02823        503        640: 1\n",
      "tensor([1.96263], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.34      0.288      0.252       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      3.84G    0.09775     0.1661     0.0279        426        640: 1\n",
      "tensor([1.77180], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.345      0.293      0.258      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      3.84G    0.09758      0.163    0.02765        705        640: 1\n",
      "tensor([1.95819], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.358      0.296      0.263      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      3.84G     0.0973      0.163    0.02762        907        640: 1\n",
      "tensor([2.51965], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.356      0.296      0.266      0.137\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      3.84G    0.09685     0.1634    0.02742        591        640: 1\n",
      "tensor([2.19410], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.369      0.293      0.271      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      3.84G    0.09699     0.1618    0.02727        567        640: 1\n",
      "tensor([1.92553], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.354      0.297      0.269       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      3.84G    0.09678     0.1618    0.02699        519        640: 1\n",
      "tensor([1.89846], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.364      0.298      0.271       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      3.84G     0.0964     0.1622    0.02688        751        640: 1\n",
      "tensor([2.18272], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.367      0.298      0.275      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      3.84G    0.09645     0.1621    0.02685        335        640: 1\n",
      "tensor([1.64528], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.374      0.299      0.276      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      3.84G    0.09618     0.1617    0.02674        754        640: 1\n",
      "tensor([2.49438], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.368      0.301      0.276      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      3.84G    0.09649     0.1612    0.02661        637        640: 1\n",
      "tensor([1.99767], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.372      0.307      0.278      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      3.84G    0.09676     0.1625    0.02651       1044        640: 1\n",
      "tensor([2.21773], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.379      0.302      0.281      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      3.84G    0.09611     0.1619    0.02642        288        640: 1\n",
      "tensor([1.54053], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.374       0.31      0.282      0.147\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      3.84G    0.09582     0.1612    0.02634        628        640: 1\n",
      "tensor([2.09068], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.391      0.308      0.287      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      3.84G    0.09638     0.1623    0.02611        580        640: 1\n",
      "tensor([2.12461], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.388      0.311      0.289      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      3.84G    0.09591      0.162    0.02606        746        640: 1\n",
      "tensor([2.18398], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.385      0.305      0.286      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      3.84G    0.09552     0.1611    0.02609        455        640: 1\n",
      "tensor([1.83393], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.385      0.312       0.29      0.153\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      3.84G    0.09585     0.1611    0.02591        505        640: 1\n",
      "tensor([1.90954], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.379       0.31      0.288      0.153\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      3.84G    0.09568     0.1609    0.02582        434        640: 1\n",
      "tensor([1.86552], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.384      0.317      0.291      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      3.84G     0.0957     0.1612    0.02573        590        640: 1\n",
      "tensor([2.17912], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.312      0.295      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      3.84G    0.09505     0.1589    0.02568        686        640: 1\n",
      "tensor([2.15242], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.396      0.314      0.296      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      3.84G    0.09534     0.1594    0.02582        590        640: 1\n",
      "tensor([2.06309], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.393      0.313      0.298      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      3.84G    0.09508     0.1588    0.02538        650        640: 1\n",
      "tensor([2.08652], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.405       0.32      0.303      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      3.84G    0.09511     0.1579    0.02525        679        640: 1\n",
      "tensor([2.05415], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.317      0.302       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      3.84G    0.09502     0.1598    0.02529        508        640: 1\n",
      "tensor([1.87476], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.408      0.313        0.3       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      3.84G    0.09485       0.16    0.02526        765        640: 1\n",
      "tensor([1.98546], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.41      0.319      0.301       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      3.84G    0.09488     0.1594    0.02514        518        640: 1\n",
      "tensor([2.01104], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.317      0.302      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      3.84G     0.0947     0.1583    0.02502        589        640: 1\n",
      "tensor([2.01359], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.407       0.32      0.301      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      3.84G    0.09477     0.1587    0.02491        615        640: 1\n",
      "tensor([2.11494], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.41      0.318      0.301       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      3.84G    0.09482     0.1582     0.0249        603        640: 1\n",
      "tensor([2.08930], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.415       0.32      0.304      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      3.84G    0.09515     0.1579    0.02494        606        640: 1\n",
      "tensor([1.99687], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.413      0.323      0.308      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      3.84G    0.09448      0.159    0.02478        549        640: 1\n",
      "tensor([2.02941], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.414       0.32      0.308      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      3.84G    0.09469     0.1598     0.0252       1661        640:  ^C\n",
      "      50/99      3.84G    0.09469     0.1598     0.0252       1661        640:  \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VisVOCKITTI.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--name Vis_Voc_Kitti_base \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#18：58"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4283cce9-9712-4ba7-b76d-c667bc4c0f65",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "3bb81a3a-ba1d-4290-96b3-fe0007c5d007",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VisDrone_incremental.yaml, weights=['runs/train/Vis_Voc_Kitti_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.354      0.282      0.247      0.129\n",
      "                   car       1610      28074      0.522      0.677      0.642      0.373\n",
      "                   van       1610       5771      0.305      0.367      0.279      0.167\n",
      "                 truck       1610       2659      0.314      0.392      0.292      0.159\n",
      "                person       1610       6376      0.344      0.132      0.121     0.0355\n",
      "               bicycle       1610       1302      0.218     0.0653     0.0618     0.0212\n",
      "                   bus       1610       2940      0.577      0.489      0.506      0.311\n",
      "             motorbike       1610       5845      0.333       0.25      0.187     0.0645\n",
      "            pedestrian       1610      21006      0.375      0.237      0.224     0.0823\n",
      "              tricycle       1610        530      0.219      0.121     0.0708     0.0328\n",
      "       awning-tricycle       1610        599      0.334     0.0851     0.0919     0.0461\n",
      "Speed: 0.1ms pre-process, 4.9ms inference, 7.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp206\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_Voc_VisDrone.yaml, weights=['runs/train/Vis_Voc_Kitti_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.789     0.0172     0.0108    0.00504\n",
      "                   car       4952       1201      0.188      0.224     0.0949     0.0501\n",
      "                person       4952       4528       0.35     0.0384     0.0424     0.0132\n",
      "             aeroplane       4952        285          1          0          0          0\n",
      "               bicycle       4952        337      0.136     0.0178    0.00849    0.00266\n",
      "                  bird       4952        459          1          0          0          0\n",
      "                  boat       4952        263          1          0          0          0\n",
      "                bottle       4952        469          1          0    0.00512    0.00256\n",
      "                   bus       4952        213     0.0921      0.061     0.0191    0.00906\n",
      "                   cat       4952        358          1          0          0          0\n",
      "                 chair       4952        756          1          0          0          0\n",
      "                   cow       4952        244          1          0          0          0\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489          1          0     0.0125    0.00761\n",
      "                 horse       4952        348          1          0          0          0\n",
      "             motorbike       4952        325     0.0088    0.00308     0.0016   0.000655\n",
      "           pottedplant       4952        480          1          0          0          0\n",
      "                 sheep       4952        242          1          0          0          0\n",
      "                  sofa       4952        239          1          0    0.00738    0.00516\n",
      "                 train       4952        282          1          0          0          0\n",
      "             tvmonitor       4952        308          1          0     0.0245     0.0097\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp207\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti_VisDrone.yaml, weights=['runs/train/Vis_Voc_Kitti_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.659      0.123      0.104     0.0508\n",
      "                   car       2244       8711       0.64      0.597      0.621       0.31\n",
      "                   van       2244        861      0.175     0.0848     0.0719     0.0311\n",
      "                 truck       2244        333      0.171      0.297      0.112     0.0583\n",
      "                  tram       2244        138          1          0          0          0\n",
      "                person       2244       1286      0.286    0.00544     0.0265    0.00769\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          1          0          0          0\n",
      "                  misc       2244        284          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp208\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# Vis\n",
    "model = f'runs/train/Vis_Voc_Kitti_base/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_Voc_VisDrone.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti_VisDrone.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1b7aa52b-f854-4681-ae66-9dbe9df9e7f2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "977d9301-5261-4a43-b69c-446ad90b1604",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e691524-717f-4f47-a424-14c7955f4a6d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c2058404-1034-4b5a-8bf5-090c9ffd5153",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9563f77a-ad84-4a34-a208-e83e4ba40bfd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ac171bc4-7a3e-4ff8-90fa-bc0c7479649a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1e-4的baseline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "003ee3f2-03e7-4b5e-abde-1ddbcc919cba",
   "metadata": {},
   "outputs": [],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VisVOCKITTI.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/increment_VOC_Lwf/weights/last.pt \\\n",
    "--name Vis_Voc_Kitti_lwf_base \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "02789044-866f-4fac-9f4c-7b2052090418",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VisDrone_incremental.yaml, weights=['runs/train/Vis_Voc_Kitti_lwf_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102       0.36      0.278      0.247      0.129\n",
      "                   car       1610      28074      0.533      0.668      0.636      0.368\n",
      "                   van       1610       5771       0.31      0.373      0.282      0.168\n",
      "                 truck       1610       2659      0.343      0.354       0.29      0.158\n",
      "                person       1610       6376      0.341      0.132       0.11     0.0339\n",
      "               bicycle       1610       1302      0.164     0.0607      0.056     0.0185\n",
      "                   bus       1610       2940      0.583      0.501      0.513      0.316\n",
      "             motorbike       1610       5845      0.358      0.244       0.19     0.0649\n",
      "            pedestrian       1610      21006      0.383      0.231      0.219     0.0809\n",
      "              tricycle       1610        530      0.218      0.117     0.0762      0.033\n",
      "       awning-tricycle       1610        599      0.369     0.0985      0.102      0.052\n",
      "Speed: 0.1ms pre-process, 4.5ms inference, 9.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp209\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_Voc_VisDrone.yaml, weights=['runs/train/Vis_Voc_Kitti_lwf_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.767     0.0181    0.00715    0.00385\n",
      "                   car       4952       1201     0.0992      0.207     0.0554      0.029\n",
      "                person       4952       4528      0.133     0.0409     0.0229    0.00652\n",
      "             aeroplane       4952        285          1          0          0          0\n",
      "               bicycle       4952        337     0.0437     0.0208    0.00801    0.00272\n",
      "                  bird       4952        459          1          0          0          0\n",
      "                  boat       4952        263          1          0          0          0\n",
      "                bottle       4952        469          1          0          0          0\n",
      "                   bus       4952        213     0.0479     0.0845     0.0334     0.0212\n",
      "                   cat       4952        358          1          0          0          0\n",
      "                 chair       4952        756          1          0          0          0\n",
      "                   cow       4952        244          1          0          0          0\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489          1          0          0          0\n",
      "                 horse       4952        348          1          0          0          0\n",
      "             motorbike       4952        325     0.0129    0.00923    0.00102    0.00033\n",
      "           pottedplant       4952        480          1          0          0          0\n",
      "                 sheep       4952        242          1          0     0.0141     0.0113\n",
      "                  sofa       4952        239          1          0          0          0\n",
      "                 train       4952        282          1          0          0          0\n",
      "             tvmonitor       4952        308          1          0    0.00813    0.00598\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp210\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti_VisDrone.yaml, weights=['runs/train/Vis_Voc_Kitti_lwf_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.637       0.13     0.0972     0.0462\n",
      "                   car       2244       8711      0.565      0.611      0.593      0.286\n",
      "                   van       2244        861      0.173     0.0929     0.0673     0.0298\n",
      "                 truck       2244        333      0.154      0.321     0.0914     0.0459\n",
      "                  tram       2244        138          1          0          0          0\n",
      "                person       2244       1286      0.209     0.0166     0.0263     0.0079\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          1          0          0          0\n",
      "                  misc       2244        284          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 0.9ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp211\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# Vis\n",
    "model = f'runs/train/Vis_Voc_Kitti_lwf_base/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_Voc_VisDrone.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti_VisDrone.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c4871514-706d-49cd-8bf7-a175134adf52",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6fdedea8-6234-4d32-a9ad-68ce28ca1c9d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b71bd734-c0b5-478c-a143-b46215555b0f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "00421c40-d49a-45d6-860a-525d06d985e5",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/increment_VOC_Lwf/weights/last.pt, cfg=models/yolov5s_VisVOCKITTI.yaml, data=data/VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=Vis_Voc_Kitti_lwf_1e-4, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/bc292970f9d44a5da7a582a1243761d0\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     91704  models.yolo.Detect                      [29, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VisVOCKITTI summary: 217 layers, 7097848 parameters, 7097848 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_Lwf/weights/last.pt\n",
      "Overriding model.yaml nc=29 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VisVOCKITTI summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/labels\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m2.95 anchors/target, 0.933 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29644 of 343201 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 342304 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.7493: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9995 best possible recall, 5.74 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.364/0.748-mean/best, past_thr=0.485-mean: 3,5, 4,9, 8,7, 8,15, 16,9, 16,21, 33,17, 29,37, 61,63\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/Vis_Voc_Kitti_lwf_1e-4/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/Vis_Voc_Kitti_lwf_1e-4\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.51G     0.1337     0.1303     0.0652        431        640: 1\n",
      "tensor([3.22671], device='cuda:0', grad_fn=<AddBackward0>) tensor(10832.62305, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 130770 ms: Connection timed out\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.349      0.094     0.0507     0.0202\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.71G     0.1152     0.1616    0.05077        589        640: 1\n",
      "tensor([3.01396], device='cuda:0', grad_fn=<AddBackward0>) tensor(5595.82080, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.216      0.139     0.0801      0.033\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.71G     0.1107       0.17    0.04719        586        640: 1\n",
      "tensor([3.03376], device='cuda:0', grad_fn=<AddBackward0>) tensor(5129.71387, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.335      0.173      0.102     0.0448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.71G     0.1073     0.1703    0.04488        785        640: 1\n",
      "tensor([2.79755], device='cuda:0', grad_fn=<AddBackward0>) tensor(4766.81348, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.354       0.19      0.128     0.0592\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.71G      0.105     0.1715    0.04336        417        640: 1\n",
      "tensor([2.65859], device='cuda:0', grad_fn=<AddBackward0>) tensor(5100.36377, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.394      0.197      0.145     0.0684\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.71G     0.1035     0.1702    0.04224        276        640: 1\n",
      "tensor([2.16217], device='cuda:0', grad_fn=<AddBackward0>) tensor(4175.98145, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.382      0.205      0.154      0.072\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.71G     0.1029     0.1692     0.0414        436        640: 1\n",
      "tensor([2.55537], device='cuda:0', grad_fn=<AddBackward0>) tensor(4583.53662, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.349      0.209      0.166     0.0788\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.71G     0.1024     0.1692    0.04079        521        640: 1\n",
      "tensor([2.58878], device='cuda:0', grad_fn=<AddBackward0>) tensor(4181.82031, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.322      0.211      0.174     0.0834\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.71G     0.1016     0.1693    0.04024        326        640: 1\n",
      "tensor([2.29110], device='cuda:0', grad_fn=<AddBackward0>) tensor(4959.29102, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.257       0.22      0.181     0.0871\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.71G     0.1012     0.1683    0.03982        498        640: 1\n",
      "tensor([2.55217], device='cuda:0', grad_fn=<AddBackward0>) tensor(4751.34375, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.354      0.215      0.184     0.0894\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.71G     0.1013     0.1674     0.0393        502        640: 1\n",
      "tensor([2.41323], device='cuda:0', grad_fn=<AddBackward0>) tensor(4463.88867, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.282      0.218      0.189     0.0912\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.71G     0.1005     0.1686    0.03898        568        640: 1\n",
      "tensor([2.75178], device='cuda:0', grad_fn=<AddBackward0>) tensor(4797.97412, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.288      0.226      0.194     0.0943\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.71G     0.1003     0.1685    0.03852        572        640: 1\n",
      "tensor([2.85819], device='cuda:0', grad_fn=<AddBackward0>) tensor(5391.39014, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.264       0.23      0.193     0.0948\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.71G    0.09987     0.1663    0.03833        560        640: 1\n",
      "tensor([2.85063], device='cuda:0', grad_fn=<AddBackward0>) tensor(5225.45898, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.272       0.24      0.198     0.0982\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.71G    0.09972     0.1682    0.03819        466        640: 1\n",
      "tensor([2.63064], device='cuda:0', grad_fn=<AddBackward0>) tensor(5830.74023, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.288      0.232      0.204      0.101\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.71G    0.09945     0.1666     0.0379        709        640: 1\n",
      "tensor([2.96236], device='cuda:0', grad_fn=<AddBackward0>) tensor(5170.83350, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.294      0.242      0.206      0.103\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.71G    0.09957     0.1679    0.03775        440        640: 1\n",
      "tensor([2.44725], device='cuda:0', grad_fn=<AddBackward0>) tensor(5098.98389, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.283      0.242      0.206      0.104\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.71G    0.09951     0.1677     0.0375        580        640: 1\n",
      "tensor([2.58366], device='cuda:0', grad_fn=<AddBackward0>) tensor(5290.05420, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.302      0.244      0.211      0.106\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.71G    0.09885     0.1667    0.03746        503        640: 1\n",
      "tensor([2.50476], device='cuda:0', grad_fn=<AddBackward0>) tensor(4481.05762, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.311      0.249      0.219      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.71G    0.09887     0.1673    0.03716        426        640: 1\n",
      "tensor([2.37879], device='cuda:0', grad_fn=<AddBackward0>) tensor(5030.45508, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.297      0.243      0.213      0.108\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.71G    0.09862     0.1642    0.03688        705        640: 1\n",
      "tensor([2.60869], device='cuda:0', grad_fn=<AddBackward0>) tensor(5390.91992, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.308      0.246       0.22      0.112\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.71G     0.0983     0.1642    0.03685        907        640: 1\n",
      "tensor([3.24887], device='cuda:0', grad_fn=<AddBackward0>) tensor(5434.84521, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.313      0.255      0.221      0.113\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.71G    0.09792     0.1646    0.03662        591        640: 1\n",
      "tensor([2.74040], device='cuda:0', grad_fn=<AddBackward0>) tensor(4867.49072, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.317      0.257      0.224      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.71G     0.0981     0.1631    0.03649        567        640: 1\n",
      "tensor([2.48706], device='cuda:0', grad_fn=<AddBackward0>) tensor(4719.27783, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.31      0.259      0.229      0.117\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.71G    0.09786     0.1631    0.03625        519        640: 1\n",
      "tensor([2.41576], device='cuda:0', grad_fn=<AddBackward0>) tensor(4491.61816, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.316      0.256      0.229      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.71G    0.09754     0.1633    0.03627        751        640: 1\n",
      "tensor([2.77159], device='cuda:0', grad_fn=<AddBackward0>) tensor(5076.75195, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.323      0.252      0.227      0.117\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.71G    0.09755     0.1631     0.0362        335        640: 1\n",
      "tensor([2.17232], device='cuda:0', grad_fn=<AddBackward0>) tensor(4707.42139, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.316      0.259      0.227      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.71G    0.09725     0.1629    0.03608        754        640: 1\n",
      "tensor([3.09525], device='cuda:0', grad_fn=<AddBackward0>) tensor(5163.46924, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.32      0.263      0.229      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.71G     0.0976     0.1622    0.03593        637        640: 1\n",
      "tensor([2.55754], device='cuda:0', grad_fn=<AddBackward0>) tensor(4871.15674, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.33      0.258       0.23      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.71G    0.09786     0.1635    0.03588       1044        640: 1\n",
      "tensor([2.75275], device='cuda:0', grad_fn=<AddBackward0>) tensor(4809.85303, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.317      0.262      0.231       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.71G    0.09722     0.1628    0.03579        288        640: 1\n",
      "tensor([2.06976], device='cuda:0', grad_fn=<AddBackward0>) tensor(4401.70947, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.319      0.263      0.233      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.71G    0.09693     0.1622    0.03568        628        640: 1\n",
      "tensor([2.62857], device='cuda:0', grad_fn=<AddBackward0>) tensor(4516.02490, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.315      0.265      0.233      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.71G    0.09745     0.1631    0.03546        580        640: 1\n",
      "tensor([2.64264], device='cuda:0', grad_fn=<AddBackward0>) tensor(4745.14307, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.33      0.263      0.236      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.71G    0.09708      0.163    0.03548        746        640: 1\n",
      "tensor([2.73923], device='cuda:0', grad_fn=<AddBackward0>) tensor(4893.97900, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.322      0.269      0.237      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.71G    0.09666     0.1622    0.03549        455        640: 1\n",
      "tensor([2.34730], device='cuda:0', grad_fn=<AddBackward0>) tensor(4160.41602, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.328      0.268      0.238      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.71G    0.09697     0.1619    0.03537        505        640: 1\n",
      "tensor([2.49837], device='cuda:0', grad_fn=<AddBackward0>) tensor(5233.59717, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.322      0.269      0.239      0.125\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.71G    0.09684     0.1618    0.03528        434        640: 1\n",
      "tensor([2.43761], device='cuda:0', grad_fn=<AddBackward0>) tensor(4867.51318, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.325      0.268      0.239      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.71G    0.09689      0.162    0.03516        590        640: 1\n",
      "tensor([2.76835], device='cuda:0', grad_fn=<AddBackward0>) tensor(4672.68945, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.328      0.275      0.243      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.71G    0.09622     0.1597    0.03514        686        640: 1\n",
      "tensor([2.71962], device='cuda:0', grad_fn=<AddBackward0>) tensor(4760.31885, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.333      0.272      0.244      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.71G    0.09649     0.1601    0.03517        590        640: 1\n",
      "tensor([2.61589], device='cuda:0', grad_fn=<AddBackward0>) tensor(5089.07471, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.335       0.27      0.243      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.71G    0.09625     0.1595    0.03489        650        640: 1\n",
      "tensor([2.63631], device='cuda:0', grad_fn=<AddBackward0>) tensor(4442.57324, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.335      0.273      0.245       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.71G    0.09633     0.1586    0.03475        679        640: 1\n",
      "tensor([2.63173], device='cuda:0', grad_fn=<AddBackward0>) tensor(5151.73438, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.327      0.271      0.241      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.71G    0.09625     0.1607    0.03486        508        640: 1\n",
      "tensor([2.39051], device='cuda:0', grad_fn=<AddBackward0>) tensor(4091.34961, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.326      0.273      0.243      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.71G    0.09609     0.1606    0.03477        765        640: 1\n",
      "tensor([2.50160], device='cuda:0', grad_fn=<AddBackward0>) tensor(4488.14746, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.327      0.275      0.242      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.71G    0.09616     0.1603    0.03474        518        640: 1\n",
      "tensor([2.59993], device='cuda:0', grad_fn=<AddBackward0>) tensor(4786.58057, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.325      0.277      0.243      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.71G    0.09596     0.1589    0.03457        589        640: 1\n",
      "tensor([2.60546], device='cuda:0', grad_fn=<AddBackward0>) tensor(5060.57910, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.326      0.273      0.244      0.129\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.71G    0.09601     0.1595    0.03445        615        640: 1\n",
      "tensor([2.68570], device='cuda:0', grad_fn=<AddBackward0>) tensor(4853.20361, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.328      0.276      0.244      0.129\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.71G    0.09611     0.1589     0.0345        603        640: 1\n",
      "tensor([2.64702], device='cuda:0', grad_fn=<AddBackward0>) tensor(4838.83496, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.324      0.276      0.245       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.71G    0.09641     0.1585    0.03454        606        640: 1\n",
      "tensor([2.47752], device='cuda:0', grad_fn=<AddBackward0>) tensor(4229.44531, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.329      0.275      0.247      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.71G    0.09578     0.1596    0.03441        549        640: 1\n",
      "tensor([2.61922], device='cuda:0', grad_fn=<AddBackward0>) tensor(5204.55859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.325      0.276      0.246      0.131\n",
      "\n",
      "50 epochs completed in 0.852 hours.\n",
      "Optimizer stripped from runs/train/Vis_Voc_Kitti_lwf_1e-4/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/Vis_Voc_Kitti_lwf_1e-4/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/Vis_Voc_Kitti_lwf_1e-4/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.321      0.259      0.233      0.125\n",
      "                   car        548      14064      0.544      0.669      0.652      0.416\n",
      "                   van        548       1975      0.229      0.314      0.209      0.139\n",
      "                 truck        548        750      0.355      0.205      0.217       0.13\n",
      "                person        548       5125      0.363      0.216        0.2     0.0665\n",
      "               bicycle        548       1287      0.117      0.049     0.0401     0.0141\n",
      "                   bus        548        251      0.286      0.355      0.282      0.173\n",
      "             motorbike        548       4886      0.415      0.255      0.228     0.0815\n",
      "            pedestrian        548       8844      0.317      0.374      0.329      0.133\n",
      "              tricycle        548       1045      0.317     0.0842      0.101     0.0513\n",
      "       awning-tricycle        548        532      0.264     0.0655     0.0754     0.0492\n",
      "Results saved to \u001b[1mruns/train/Vis_Voc_Kitti_lwf_1e-4\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m The process of logging environment details (conda environment, git patch) is underway. Please be patient as this may take some time.\n",
      "\u001b[1;38;5;214mCOMET WARNING:\u001b[0m Failed to complete logging of all environment details (conda environment, git patch)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : Vis_Voc_Kitti_lwf_1e-4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/bc292970f9d44a5da7a582a1243761d0\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_f1              : 0.10502660667438547\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_false_positives : 97.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5          : 0.07536407228401423\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5:.95      : 0.04917614981303971\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_precision       : 0.26437573846171675\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_recall          : 0.0655295184381057\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_support         : 532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_true_positives  : 35.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                      : 0.06901516183920543\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives         : 476.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                  : 0.04006735154699224\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95              : 0.014090584498061363\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision               : 0.11695129446058813\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                  : 0.04895104895104895\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support                 : 1287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives          : 63.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                          : 0.3167513236324294\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives             : 222.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                      : 0.282370058356805\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                  : 0.17287767042373936\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                   : 0.2862150183434283\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                      : 0.3545816733067729\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                     : 251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives              : 89.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                          : 0.5996102273539237\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives             : 7894.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                      : 0.6524959914690136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                  : 0.4162449694798892\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                   : 0.5435821919723897\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                      : 0.6685153583617748\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                     : 14064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives              : 9402.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2005]                     : (3.9943480491638184, 9.212278366088867)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]           : (0.050719297241633876, 0.24668590374450403)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]      : (0.02021911459615143, 0.13081993875784037)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]         : (0.2156059006183587, 0.3942631834139522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]            : (0.09404562778168743, 0.2770807257422127)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_f1                    : 0.3156410160648712\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_false_positives       : 1752.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5                : 0.22823884194716754\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5:.95            : 0.08150443028872276\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_precision             : 0.4151687032792437\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_recall                : 0.2546049938600082\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_support               : 4886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_true_positives        : 1244.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_f1                   : 0.34338685645655775\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_false_positives      : 7129.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5               : 0.328866669763464\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5:.95           : 0.13263549516869685\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_precision            : 0.31713428305111513\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_recall               : 0.3743781094527363\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_support              : 8844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_true_positives       : 3311.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                       : 0.27080507067689596\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives          : 1945.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                   : 0.19988381276652267\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95               : 0.06650244114711971\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision                : 0.3626718881764484\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                   : 0.21607265802387754\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                  : 5125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives           : 1107.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]            : (0.09577766060829163, 0.13367776572704315)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]            : (0.03441421687602997, 0.06519909203052521)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]            : (0.1303074210882187, 0.17147532105445862)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_f1                     : 0.1330977470667731\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_false_positives        : 189.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5                 : 0.10125595216605768\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5:.95             : 0.051321034559242376\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_precision              : 0.3173041467304252\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_recall                 : 0.08421052631578947\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_support                : 1045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_true_positives         : 88.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                        : 0.2602732220808375\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives           : 279.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                    : 0.2173023252235058\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95                : 0.13038362555775362\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                 : 0.35535304357475256\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                    : 0.20533333333333334\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                   : 750\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives            : 154.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]              : (0.09355079382658005, 0.1144498884677887)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]              : (0.037547461688518524, 0.05420192703604698)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]              : (0.2191552072763443, 0.2397325336933136)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                          : 0.26470995989667884\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives             : 2089.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                      : 0.20854336762064526\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                  : 0.13883524532205654\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                   : 0.22883529265290758\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                      : 0.3139240506329114\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                     : 1975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives              : 620.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                     : (0.0004960000000000005, 0.07007407407407407)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : Vis_Voc_Kitti_lwf_1e-4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/bc292970f9d44a5da7a582a1243761d0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.18125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/Vis_Voc_Kitti_lwf_1e-4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.11 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;214mCOMET WARNING:\u001b[0m Failed to log run in comet.com\n"
     ]
    }
   ],
   "source": [
    "# 在有lwf的基础上用lwf增量\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VisVOCKITTI.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/increment_VOC_Lwf/weights/last.pt \\\n",
    "--name Vis_Voc_Kitti_lwf_1e-4 \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "c7f2deb8-2d75-4489-b8d5-ad4168005fbc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VisDrone_incremental.yaml, weights=['runs/train/Vis_Voc_Kitti_lwf_1e-4/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.285      0.236      0.193      0.101\n",
      "                   car       1610      28074      0.502      0.627      0.593       0.34\n",
      "                   van       1610       5771      0.225      0.335      0.205      0.121\n",
      "                 truck       1610       2659      0.204      0.293      0.152     0.0767\n",
      "                person       1610       6376      0.216      0.101     0.0664     0.0188\n",
      "               bicycle       1610       1302      0.154     0.0341     0.0308     0.0122\n",
      "                   bus       1610       2940      0.538      0.438      0.432      0.264\n",
      "             motorbike       1610       5845       0.33      0.175      0.138     0.0474\n",
      "            pedestrian       1610      21006      0.274      0.228      0.199     0.0745\n",
      "              tricycle       1610        530      0.165     0.0868       0.06     0.0274\n",
      "       awning-tricycle       1610        599      0.239     0.0484      0.056     0.0264\n",
      "Speed: 0.1ms pre-process, 4.0ms inference, 21.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp212\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_Voc_VisDrone.yaml, weights=['runs/train/Vis_Voc_Kitti_lwf_1e-4/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.199     0.0957     0.0688     0.0319\n",
      "                   car       4952       1201      0.192      0.446      0.333      0.189\n",
      "                person       4952       4528      0.427       0.25      0.206     0.0691\n",
      "             aeroplane       4952        285      0.182     0.0413      0.019    0.00693\n",
      "               bicycle       4952        337      0.176      0.175     0.0969      0.032\n",
      "                  bird       4952        459      0.279     0.0632     0.0508     0.0282\n",
      "                  boat       4952        263     0.0728     0.0266     0.0104    0.00356\n",
      "                bottle       4952        469      0.167     0.0917     0.0324     0.0102\n",
      "                   bus       4952        213      0.107      0.188     0.0948     0.0489\n",
      "                   cat       4952        358     0.0608    0.00838    0.00289    0.00095\n",
      "                 chair       4952        756      0.148     0.0714     0.0417     0.0202\n",
      "                   cow       4952        244      0.437     0.0246     0.0743     0.0325\n",
      "           diningtable       4952        206          0          0   0.000147   3.19e-05\n",
      "                   dog       4952        489     0.0956     0.0245     0.0142    0.00618\n",
      "                 horse       4952        348     0.0732     0.0287    0.00764    0.00289\n",
      "             motorbike       4952        325     0.0963      0.105     0.0556     0.0216\n",
      "           pottedplant       4952        480      0.132     0.0271      0.019    0.00688\n",
      "                 sheep       4952        242      0.524      0.141      0.176     0.0949\n",
      "                  sofa       4952        239      0.193     0.0418     0.0312     0.0161\n",
      "                 train       4952        282      0.302     0.0369     0.0293     0.0141\n",
      "             tvmonitor       4952        308      0.309      0.123     0.0813     0.0343\n",
      "Speed: 0.1ms pre-process, 1.3ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp213\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti_VisDrone.yaml, weights=['runs/train/Vis_Voc_Kitti_lwf_1e-4/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VisVOCKITTI summary: 160 layers, 7088344 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.359      0.207      0.177     0.0867\n",
      "                   car       2244       8711      0.495      0.782      0.755      0.418\n",
      "                   van       2244        861      0.264      0.297      0.189      0.104\n",
      "                 truck       2244        333      0.154      0.414      0.147     0.0744\n",
      "                  tram       2244        138          0          0     0.0229    0.00696\n",
      "                person       2244       1286      0.467      0.151        0.2     0.0591\n",
      "        person_sitting       2244         89          0          0      0.015     0.0026\n",
      "               cyclist       2244        496      0.494    0.00403     0.0413     0.0117\n",
      "                  misc       2244        284          1    0.00694     0.0504     0.0166\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp214\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# Vis\n",
    "model = f'runs/train/Vis_Voc_Kitti_lwf_1e-4/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_Voc_VisDrone.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti_VisDrone.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ac825a35-1278-4b14-b459-63ca13488406",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d26573bf-d508-4012-999a-f128f2fa749f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
