{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "11b6a1ee-74b9-456e-853a-6d9d472d114f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setup complete ✅ (112 CPUs, 503.5 GB RAM, 28.1/30.0 GB disk)\n"
     ]
    }
   ],
   "source": [
    "import comet_ml\n",
    "import torch\n",
    "import utils\n",
    "\n",
    "comet_ml.init(project_name='exp_100epoch')\n",
    "# 这里应该会包含100epoch的0,0.6,1.2加雾以及各个以100epoch为单位的增量\n",
    "display = utils.notebook_init()  # checks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "10e1ac5d-f061-4f01-b9e2-7b01ef24aab0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_openimages.yaml, data=data/VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=baseline_VisDrone, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/aacee3ec2ffd4892bf5d4a9f3d9662a6\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from yolov5s.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/labels\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m2.95 anchors/target, 0.933 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29644 of 343201 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 342304 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.7493: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9995 best possible recall, 5.74 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.364/0.748-mean/best, past_thr=0.485-mean: 3,5, 4,9, 8,7, 8,15, 16,9, 16,21, 33,17, 29,37, 61,63\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/baseline_VisDrone/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/baseline_VisDrone\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.65G     0.1273     0.1377     0.0614        431        640: 1\n",
      "tensor([2.06185], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.165      0.151     0.0787      0.032\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.85G       0.11     0.1687    0.04068        589        640: 1\n",
      "tensor([2.38733], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.315      0.185      0.103     0.0429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.85G     0.1082     0.1716    0.03719        586        640: 1\n",
      "tensor([2.39998], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.47      0.194      0.135     0.0591\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.85G     0.1058     0.1695    0.03462        785        640: 1\n",
      "tensor([2.28726], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.508      0.195      0.166     0.0764\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.85G     0.1036     0.1704    0.03301        417        640: 1\n",
      "tensor([2.08199], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.543      0.205      0.184     0.0867\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.85G     0.1021     0.1692    0.03196        276        640: 1\n",
      "tensor([1.62358], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.486      0.216      0.201     0.0971\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.85G     0.1015     0.1677    0.03114        436        640: 1\n",
      "tensor([2.00658], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.31      0.234      0.213      0.105\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.85G     0.1008     0.1677    0.03053        521        640: 1\n",
      "tensor([2.05718], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.321      0.228      0.221       0.11\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.85G    0.09984     0.1675    0.02999        326        640: 1\n",
      "tensor([1.70058], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.323      0.255      0.225       0.11\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.85G     0.0995     0.1666    0.02957        498        640: 1\n",
      "tensor([1.97196], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.343      0.268      0.237      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.85G    0.09957     0.1656    0.02915        502        640: 1\n",
      "tensor([1.86906], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.338      0.275      0.239      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.85G    0.09872     0.1663    0.02867        568        640: 1\n",
      "tensor([2.17400], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.335      0.282      0.249      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.85G    0.09855     0.1663    0.02826        572        640: 1\n",
      "tensor([2.18883], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.362      0.277      0.254      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.85G    0.09801     0.1639    0.02804        560        640: 1\n",
      "tensor([2.19058], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.356      0.289      0.259      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.85G    0.09787     0.1659    0.02779        466        640: 1\n",
      "tensor([1.95709], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.363      0.292      0.266      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.85G    0.09754      0.164    0.02765        709        640: 1\n",
      "tensor([2.32350], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.36      0.296      0.266      0.137\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.85G    0.09767      0.165    0.02736        440        640: 1\n",
      "tensor([1.83365], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.367      0.295      0.271      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.85G    0.09756     0.1648    0.02715        580        640: 1\n",
      "tensor([1.91298], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.374      0.297      0.273       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.85G    0.09686     0.1637    0.02703        503        640: 1\n",
      "tensor([1.95568], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.366      0.299      0.276      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.85G    0.09685     0.1643    0.02674        426        640: 1\n",
      "tensor([1.72600], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.375      0.302      0.279      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.85G    0.09659     0.1612     0.0265        705        640: 1\n",
      "tensor([1.94962], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.372      0.304      0.281      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.85G    0.09626      0.161    0.02646        907        640: 1\n",
      "tensor([2.53592], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.379      0.302      0.283      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.85G     0.0958     0.1613    0.02623        591        640: 1\n",
      "tensor([2.13241], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.389      0.307      0.286       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.85G    0.09595     0.1597    0.02612        567        640: 1\n",
      "tensor([1.93822], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.388      0.299      0.287       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.85G    0.09572     0.1598    0.02588        519        640: 1\n",
      "tensor([1.89805], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.397      0.307       0.29      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.85G    0.09534     0.1599    0.02577        751        640: 1\n",
      "tensor([2.14271], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.393      0.313      0.293      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.85G    0.09532     0.1596    0.02569        335        640: 1\n",
      "tensor([1.60446], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.389      0.316      0.296      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.85G    0.09502     0.1593    0.02566        754        640: 1\n",
      "tensor([2.47124], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.397       0.32      0.297      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.85G    0.09534     0.1587    0.02551        637        640: 1\n",
      "tensor([1.94841], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.394      0.315      0.296      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.85G    0.09561     0.1598    0.02538       1044        640: 1\n",
      "tensor([2.18377], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759        0.4      0.318      0.299      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.85G    0.09488      0.159    0.02527        288        640: 1\n",
      "tensor([1.47369], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.398      0.318      0.298      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.85G    0.09461     0.1584    0.02524        628        640: 1\n",
      "tensor([2.06197], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.404      0.317      0.303      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.85G     0.0951     0.1591    0.02498        580        640: 1\n",
      "tensor([2.06669], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.406      0.316      0.305      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.85G    0.09467      0.159    0.02495        746        640: 1\n",
      "tensor([2.14215], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.406      0.321      0.306      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.85G     0.0942      0.158    0.02491        455        640: 1\n",
      "tensor([1.75708], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.319      0.304      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.85G    0.09454     0.1576    0.02477        505        640: 1\n",
      "tensor([1.86413], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.419      0.319      0.309      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.85G    0.09434     0.1573     0.0247        434        640: 1\n",
      "tensor([1.83600], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.407      0.324      0.309      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.85G    0.09437     0.1578    0.02459        590        640: 1\n",
      "tensor([2.14103], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.423      0.319      0.311      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.85G    0.09367     0.1552    0.02453        686        640: 1\n",
      "tensor([2.09664], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.419      0.323      0.311      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.85G     0.0939     0.1554    0.02464        590        640: 1\n",
      "tensor([1.98639], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.319      0.313      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.85G    0.09361     0.1548    0.02423        650        640: 1\n",
      "tensor([2.05316], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.432      0.324      0.316      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.85G    0.09371     0.1538    0.02411        679        640: 1\n",
      "tensor([2.01826], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.418      0.327      0.312      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.85G    0.09358     0.1559    0.02413        508        640: 1\n",
      "tensor([1.81252], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.421      0.327      0.315      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.85G    0.09339     0.1557    0.02409        765        640: 1\n",
      "tensor([1.97624], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.429      0.323      0.315      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.85G    0.09346     0.1553    0.02396        518        640: 1\n",
      "tensor([1.97451], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.432      0.324      0.316       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.85G    0.09321      0.154    0.02384        589        640: 1\n",
      "tensor([1.93111], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.328      0.318      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.85G     0.0932     0.1543    0.02369        615        640: 1\n",
      "tensor([2.06856], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.434      0.329      0.319       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.85G    0.09332     0.1536    0.02377        603        640: 1\n",
      "tensor([2.02696], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.428      0.327      0.318      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.85G    0.09364     0.1533    0.02378        606        640: 1\n",
      "tensor([1.95758], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.434      0.328      0.319      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.85G      0.093     0.1541    0.02359        549        640: 1\n",
      "tensor([1.94359], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.429       0.33      0.318      0.171\n",
      "\n",
      "50 epochs completed in 0.657 hours.\n",
      "Optimizer stripped from runs/train/baseline_VisDrone/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/baseline_VisDrone/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/baseline_VisDrone/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.421       0.31      0.301      0.164\n",
      "                   car        548      14064      0.597      0.704      0.706      0.465\n",
      "                   van        548       1975      0.421      0.338      0.331      0.222\n",
      "                 truck        548        750      0.434      0.276      0.285      0.172\n",
      "                person        548       5125      0.432      0.285      0.268     0.0946\n",
      "               bicycle        548       1287      0.217      0.137     0.0989     0.0337\n",
      "                   bus        548        251      0.525      0.402      0.389      0.238\n",
      "             motorbike        548       4886      0.452       0.35      0.323      0.125\n",
      "            pedestrian        548       8844      0.446      0.369      0.371      0.154\n",
      "              tricycle        548       1045      0.426      0.144      0.157     0.0802\n",
      "       awning-tricycle        548        532      0.261      0.094     0.0865     0.0534\n",
      "Results saved to \u001b[1mruns/train/baseline_VisDrone\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : baseline_VisDrone\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/aacee3ec2ffd4892bf5d4a9f3d9662a6\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_f1              : 0.13821634540872327\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_false_positives : 142.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5          : 0.0865171596684956\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5:.95      : 0.05343748339859502\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_precision       : 0.26109192912203383\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_recall          : 0.09398496240601503\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_support         : 532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_true_positives  : 50.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                      : 0.16771643737663902\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives         : 636.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                  : 0.09887737149296785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95              : 0.03371098529251408\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision               : 0.2168073572470018\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                  : 0.13675213675213677\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support                 : 1287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives          : 176.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                          : 0.45560827189485464\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives             : 91.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                      : 0.38903146107379233\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                  : 0.2379868076019666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                   : 0.5250480987968394\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                      : 0.40239043824701193\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                     : 251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives              : 101.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                          : 0.6464231360160794\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives             : 6677.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                      : 0.7057215695470388\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                  : 0.46513395882954167\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                   : 0.5973502539007539\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                      : 0.7042804323094426\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                     : 14064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives              : 9905.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2005]                     : (3.3735175132751465, 6.2281904220581055)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]           : (0.0786610040916533, 0.31923430791866714)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]      : (0.031995447172979224, 0.1718048527236208)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]         : (0.1646849916216506, 0.5429359990396869)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]            : (0.1508963464133186, 0.33039238082201183)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_f1                    : 0.3946201733417753\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_false_positives       : 2071.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5                : 0.3232238927316888\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5:.95            : 0.1253857637309405\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_precision             : 0.4523137638715528\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_recall                : 0.3499795333606222\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_support               : 4886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_true_positives        : 1710.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_f1                   : 0.4037737989569948\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_false_positives      : 4052.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5               : 0.3705319418108616\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5:.95           : 0.15396004866742102\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_precision            : 0.44602072294842326\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_recall               : 0.36883763003165987\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_support              : 8844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_true_positives       : 3262.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                       : 0.3431099891364195\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives          : 1921.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                   : 0.268125808540627\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95               : 0.09458744553590523\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision                : 0.43171281388892424\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                   : 0.28468292682926827\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                  : 5125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives           : 1459.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]            : (0.09299547225236893, 0.12726862728595734)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]            : (0.023586321622133255, 0.06140350177884102)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]            : (0.1377228945493698, 0.17155908048152924)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_f1                     : 0.21466600854352344\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_false_positives        : 203.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5                 : 0.15673292809586292\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5:.95             : 0.08023213399637733\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_precision              : 0.4255079989853361\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_recall                 : 0.14354066985645933\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_support                : 1045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_true_positives         : 150.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                        : 0.33752906286952944\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives           : 270.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                    : 0.2846357340942738\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95                : 0.17199717133419562\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                 : 0.4343619820867324\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                    : 0.276\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                   : 750\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives            : 207.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]              : (0.0915689766407013, 0.10670154541730881)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]              : (0.030134771019220352, 0.0458858497440815)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]              : (0.2270796000957489, 0.2364908903837204)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                          : 0.3747695936408502\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives             : 918.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                      : 0.3311529249876794\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                  : 0.22243869054618867\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                   : 0.42094757225759677\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                      : 0.33772151898734176\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                     : 1975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives              : 667.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                     : (0.0004960000000000005, 0.07007407407407407)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : baseline_VisDrone\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/aacee3ec2ffd4892bf5d4a9f3d9662a6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/baseline_VisDrone\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.15 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights yolov5s.pt \\\n",
    "--name baseline_VisDrone \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "564937f5-fd64-48e2-b597-704164b88ae3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/baseline_VisDrone/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.371       0.29      0.257      0.136\n",
      "                   car       1610      28074       0.56      0.682      0.656      0.384\n",
      "                   van       1610       5771      0.334      0.355      0.288      0.173\n",
      "                 truck       1610       2659      0.317      0.392      0.292      0.165\n",
      "                person       1610       6376      0.375      0.139      0.128     0.0391\n",
      "               bicycle       1610       1302      0.227     0.0776     0.0638     0.0217\n",
      "                   bus       1610       2940      0.595      0.502      0.518      0.326\n",
      "             motorbike       1610       5845      0.361       0.26      0.198     0.0685\n",
      "            pedestrian       1610      21006      0.389      0.242      0.229     0.0862\n",
      "              tricycle       1610        530      0.224       0.14     0.0856     0.0395\n",
      "       awning-tricycle       1610        599      0.323      0.113      0.109     0.0581\n",
      "Speed: 0.1ms pre-process, 5.2ms inference, 6.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp322\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/baseline_VisDrone/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.836     0.0334     0.0338     0.0244\n",
      "                   car        600        113      0.129     0.0442     0.0315     0.0175\n",
      "                   van        600          6          1          0   0.000216   2.16e-05\n",
      "                 truck        600         17          0          0    0.00728    0.00632\n",
      "                person        600       1131          1          0     0.0205    0.00784\n",
      "               bicycle        600         43      0.459     0.0233     0.0189     0.0051\n",
      "                  bird        600         61          1          0          0          0\n",
      "                  boat        600         82          1          0          0          0\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.798      0.667      0.665      0.499\n",
      "                   cat        600          5          1          0          0          0\n",
      "                 chair        600         12          1          0          0          0\n",
      "                   dog        600         25          1          0          0          0\n",
      "                 horse        600         37          1          0          0          0\n",
      "                 sheep        600          8          1          0          0          0\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1          1          0          0          0\n",
      "                monkey        600         16          1          0          0          0\n",
      "                   pig        600          7          1          0          0          0\n",
      "                   toy        600         42          1          0          0          0\n",
      "         traffic light        600          5          0          0          0          0\n",
      "          traffic sign        600          1          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 2.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp323\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/baseline_VisDrone/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.726     0.0198     0.0113    0.00541\n",
      "                   car       4952       1201      0.147      0.237      0.111     0.0583\n",
      "                person       4952       4528      0.244     0.0413     0.0403     0.0149\n",
      "             aeroplane       4952        285          0          0          0          0\n",
      "               bicycle       4952        337     0.0606     0.0326    0.00874    0.00233\n",
      "                  bird       4952        459          1          0          0          0\n",
      "                  boat       4952        263          1          0          0          0\n",
      "                bottle       4952        469          1          0    0.00447    0.00178\n",
      "                   bus       4952        213     0.0614     0.0751     0.0324     0.0183\n",
      "                   cat       4952        358          1          0          0          0\n",
      "                 chair       4952        756          1          0     0.0116    0.00491\n",
      "                   cow       4952        244          1          0          0          0\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489          1          0          0          0\n",
      "                 horse       4952        348          1          0          0          0\n",
      "             motorbike       4952        325     0.0156    0.00923     0.0021     0.0007\n",
      "           pottedplant       4952        480          1          0    0.00164   0.000164\n",
      "                 sheep       4952        242          1          0          0          0\n",
      "                  sofa       4952        239          1          0          0          0\n",
      "                 train       4952        282          1          0    0.00442     0.0033\n",
      "             tvmonitor       4952        308          1          0    0.00846    0.00338\n",
      "Speed: 0.1ms pre-process, 1.5ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp324\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/baseline_VisDrone/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.627      0.118      0.101      0.049\n",
      "                   car       2244       8711      0.579      0.611      0.608      0.298\n",
      "                   van       2244        861      0.177     0.0604     0.0709     0.0331\n",
      "                 truck       2244        333     0.0991      0.267     0.0934     0.0473\n",
      "                  tram       2244        138          1          0          0          0\n",
      "                person       2244       1286      0.164    0.00544     0.0291    0.00835\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          1          0          0          0\n",
      "                  misc       2244        284          1          0    0.00925    0.00533\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp325\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/baseline_VisDrone/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e45d4642-8846-415f-80ee-2c9de99f9295",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8cc2e64c-2b26-42f4-aa4b-aca75c091f29",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6ba72121-1607-4373-8d9a-b3329857a5f9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "d54f3eed-a9c0-49b7-93a0-a6a3fe4ea77d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/k_v_o_replay_DER/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=vis_k_v_o_base_50, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/d60f74827d2b4bbaa0b99a8c976ad2fc\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 351/355 items from runs/train/k_v_o_replay_DER/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/labels\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m2.95 anchors/target, 0.933 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29644 of 343201 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 342304 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.7493: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9995 best possible recall, 5.74 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.364/0.748-mean/best, past_thr=0.485-mean: 3,5, 4,9, 8,7, 8,15, 16,9, 16,21, 33,17, 29,37, 61,63\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/vis_k_v_o_base_50/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/vis_k_v_o_base_50\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.65G     0.1461      0.121    0.07936       1333        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': GnuTLS recv error (-110): The TLS connection was non-properly terminated.\n",
      "       0/49      3.65G     0.1338     0.1345     0.0636        431        640: 1\n",
      "tensor([2.15022], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.035      0.114      0.046      0.018\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.85G     0.1134     0.1668     0.0461        589        640: 1\n",
      "tensor([2.41847], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.48      0.161     0.0913     0.0392\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.85G     0.1093     0.1724    0.03967        586        640: 1\n",
      "tensor([2.46499], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.406      0.187      0.111      0.049\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.85G      0.106     0.1705    0.03741        785        640: 1\n",
      "tensor([2.30443], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.327      0.208      0.129     0.0606\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.85G     0.1036      0.171    0.03556        417        640: 1\n",
      "tensor([2.11018], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.49      0.191      0.147     0.0675\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.85G     0.1021     0.1695    0.03411        276        640: 1\n",
      "tensor([1.63566], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.503      0.198      0.166     0.0764\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.85G     0.1016     0.1682    0.03297        436        640: 1\n",
      "tensor([2.03199], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.407      0.216      0.177      0.085\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.85G      0.101     0.1683     0.0322        521        640: 1\n",
      "tensor([2.08751], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.429      0.207      0.182     0.0894\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.85G     0.1001     0.1683    0.03157        326        640: 1\n",
      "tensor([1.72205], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.449       0.22      0.198     0.0974\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.85G    0.09981     0.1672    0.03106        498        640: 1\n",
      "tensor([1.98391], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.551      0.226      0.207      0.103\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.85G    0.09992     0.1661    0.03057        502        640: 1\n",
      "tensor([1.89746], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.423      0.232      0.215      0.105\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.85G    0.09903     0.1671    0.03003        568        640: 1\n",
      "tensor([2.16440], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.31      0.243      0.223      0.112\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.85G    0.09889      0.167    0.02954        572        640: 1\n",
      "tensor([2.19860], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.343      0.258      0.228      0.116\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.85G    0.09837     0.1646    0.02935        560        640: 1\n",
      "tensor([2.20977], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.326      0.265      0.233      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.85G    0.09823     0.1667     0.0291        466        640: 1\n",
      "tensor([1.99659], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.338      0.268      0.237       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.85G    0.09796      0.165    0.02891        709        640: 1\n",
      "tensor([2.33343], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.334      0.276      0.241      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.85G    0.09804     0.1658    0.02864        440        640: 1\n",
      "tensor([1.84463], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.335      0.279       0.25      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.85G    0.09793     0.1658    0.02837        580        640: 1\n",
      "tensor([1.93949], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.344      0.285      0.249      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.85G    0.09724     0.1646    0.02825        503        640: 1\n",
      "tensor([1.95885], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.336      0.296      0.253       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.85G    0.09725     0.1654    0.02793        426        640: 1\n",
      "tensor([1.73635], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.35      0.287      0.257      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.85G      0.097     0.1622     0.0277        705        640: 1\n",
      "tensor([1.96072], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.346      0.295      0.258      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.85G     0.0967      0.162    0.02764        907        640: 1\n",
      "tensor([2.54116], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.364      0.293      0.261      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.85G    0.09627     0.1624    0.02741        591        640: 1\n",
      "tensor([2.17701], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.354      0.298      0.265       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.85G     0.0964     0.1608    0.02726        567        640: 1\n",
      "tensor([1.93609], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.369      0.296      0.268       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.85G    0.09618     0.1608      0.027        519        640: 1\n",
      "tensor([1.89358], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.369      0.298      0.271      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.85G    0.09581     0.1611    0.02691        751        640: 1\n",
      "tensor([2.16681], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.361      0.302      0.271      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.85G    0.09578     0.1609    0.02684        335        640: 1\n",
      "tensor([1.62697], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.371      0.299      0.275      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.85G    0.09542     0.1604    0.02672        754        640: 1\n",
      "tensor([2.49479], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.373      0.303      0.277      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.85G    0.09582       0.16    0.02661        637        640: 1\n",
      "tensor([1.98348], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.369      0.306      0.281      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.85G    0.09602     0.1611    0.02647       1044        640: 1\n",
      "tensor([2.19550], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.382      0.303       0.28      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.85G    0.09535     0.1604    0.02636        288        640: 1\n",
      "tensor([1.50834], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.39      0.302      0.284       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.85G    0.09507     0.1597    0.02626        628        640: 1\n",
      "tensor([2.05828], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.392      0.305      0.285       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.85G    0.09554     0.1605    0.02602        580        640: 1\n",
      "tensor([2.09625], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.394      0.308      0.291      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.85G    0.09515     0.1604    0.02597        746        640: 1\n",
      "tensor([2.17806], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.381      0.308      0.288      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.85G    0.09467     0.1594    0.02599        455        640: 1\n",
      "tensor([1.80581], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.396      0.304       0.29      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.85G    0.09502     0.1591    0.02582        505        640: 1\n",
      "tensor([1.88543], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.395      0.307      0.288      0.153\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.85G    0.09483      0.159    0.02573        434        640: 1\n",
      "tensor([1.83393], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.391      0.307       0.29      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.85G    0.09489     0.1593    0.02562        590        640: 1\n",
      "tensor([2.17650], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.408      0.308      0.294      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.85G    0.09416     0.1568    0.02555        686        640: 1\n",
      "tensor([2.14791], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759        0.4      0.308      0.295      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.85G    0.09443      0.157    0.02565        590        640: 1\n",
      "tensor([2.01197], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.395      0.313      0.296      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.85G    0.09415     0.1566    0.02528        650        640: 1\n",
      "tensor([2.06919], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.308      0.297      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.85G    0.09424     0.1555    0.02513        679        640: 1\n",
      "tensor([2.03022], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.404      0.309      0.297      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.85G    0.09413     0.1575    0.02517        508        640: 1\n",
      "tensor([1.84921], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.402      0.314      0.298      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.85G    0.09392     0.1574    0.02511        765        640: 1\n",
      "tensor([1.97630], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.404      0.317      0.299       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.85G    0.09402      0.157    0.02505        518        640: 1\n",
      "tensor([1.97804], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.311      0.297       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.85G    0.09376     0.1558    0.02492        589        640: 1\n",
      "tensor([1.97033], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.408      0.311      0.298       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.85G    0.09382     0.1561     0.0248        615        640: 1\n",
      "tensor([2.08748], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.315      0.302      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.85G    0.09393     0.1555    0.02482        603        640: 1\n",
      "tensor([2.06269], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.41      0.313        0.3      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.85G    0.09422     0.1551    0.02489        606        640: 1\n",
      "tensor([1.96942], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.314      0.301      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.85G    0.09359     0.1561    0.02467        549        640: 1\n",
      "tensor([1.94785], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.416      0.314      0.304      0.163\n",
      "\n",
      "50 epochs completed in 0.655 hours.\n",
      "Optimizer stripped from runs/train/vis_k_v_o_base_50/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/vis_k_v_o_base_50/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/vis_k_v_o_base_50/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.414      0.302      0.293      0.159\n",
      "                   car        548      14064      0.592      0.715       0.71      0.468\n",
      "                   van        548       1975       0.42      0.311      0.306      0.207\n",
      "                 truck        548        750      0.427      0.276      0.276      0.165\n",
      "                person        548       5125      0.418      0.273      0.257      0.087\n",
      "               bicycle        548       1287      0.201      0.128      0.083     0.0301\n",
      "                   bus        548        251      0.479       0.39      0.382      0.233\n",
      "             motorbike        548       4886      0.452       0.34      0.314       0.12\n",
      "            pedestrian        548       8844      0.432      0.382      0.369      0.153\n",
      "              tricycle        548       1045      0.429      0.122      0.142     0.0715\n",
      "       awning-tricycle        548        532       0.29     0.0865     0.0932     0.0584\n",
      "Results saved to \u001b[1mruns/train/vis_k_v_o_base_50\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : vis_k_v_o_base_50\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/d60f74827d2b4bbaa0b99a8c976ad2fc\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_f1              : 0.13325719879053627\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_false_positives : 112.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5          : 0.09323306954919976\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5:.95      : 0.05840634083321985\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_precision       : 0.2904146351058752\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_recall          : 0.08646616541353383\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_support         : 532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_true_positives  : 46.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                      : 0.15599126732690816\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives         : 654.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                  : 0.08295009011858917\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95              : 0.030076656525608113\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision               : 0.2005180490952849\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                  : 0.12764629431296098\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support                 : 1287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives          : 164.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                          : 0.43028922437641237\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives             : 107.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                      : 0.3824112679321677\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                  : 0.23336748830613813\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                   : 0.4791998598311367\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                      : 0.3904382470119522\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                     : 251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives              : 98.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                          : 0.647881047309764\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives             : 6919.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                      : 0.7096800671955585\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                  : 0.4681409836908796\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                   : 0.5923678622184445\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                      : 0.7148748577929466\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                     : 14064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives              : 10054.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2005]                     : (3.3858470916748047, 6.2970428466796875)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]           : (0.04595493212473652, 0.30350612500782825)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]      : (0.017999785705553967, 0.16322273093450013)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]         : (0.03501674362730931, 0.5509073082765478)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]            : (0.11407392497019404, 0.3165241668495989)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_f1                    : 0.3882537937854636\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_false_positives       : 2018.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5                : 0.31367523123431923\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5:.95            : 0.12000561459370367\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_precision             : 0.4518332331658401\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_recall                : 0.3403602128530495\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_support               : 4886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_true_positives        : 1663.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_f1                   : 0.4053609488106964\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_false_positives      : 4449.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5               : 0.36946487931506256\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5:.95           : 0.15302266265873893\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_precision            : 0.43167977498528776\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_recall               : 0.3820669380370873\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_support              : 8844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_true_positives       : 3379.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                       : 0.3299760681465354\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives          : 1945.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                   : 0.2572873550490806\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95               : 0.08701593672388927\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision                : 0.4179780565667687\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                   : 0.27258536585365856\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                  : 5125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives           : 1397.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]            : (0.09359484165906906, 0.13379372656345367)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]            : (0.02467232756316662, 0.06359588354825974)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]            : (0.13454082608222961, 0.1724129617214203)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_f1                     : 0.1902302136766654\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_false_positives        : 170.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5                 : 0.14159545312897448\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5:.95             : 0.07152555970443465\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_precision              : 0.4289776183885058\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_recall                 : 0.12221273905484432\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_support                : 1045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_true_positives         : 128.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                        : 0.33514658835627986\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives           : 278.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                    : 0.2762972916647862\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95                : 0.1651820408743459\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                 : 0.4265575426514716\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                    : 0.276\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                   : 750\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives            : 207.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]              : (0.09180101007223129, 0.11348856985569)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]              : (0.03071555867791176, 0.055173493921756744)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]              : (0.2250179648399353, 0.2405259907245636)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                          : 0.356944490751365\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives             : 848.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                      : 0.30596667626361496\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                  : 0.2068784904904951\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                   : 0.41968710508894835\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                      : 0.3105219143193827\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                     : 1975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives              : 613.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                     : (0.0004960000000000005, 0.07007407407407407)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                     : (0.0004960000000000005, 0.00959609547325103)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : vis_k_v_o_base_50\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/d60f74827d2b4bbaa0b99a8c976ad2fc\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/vis_k_v_o_base_50\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.10 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "--name vis_k_v_o_base_50 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "732f520b-0a6b-4881-96bb-2f46e2d20215",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/vis_k_v_o_base_50/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.347      0.288      0.246      0.129\n",
      "                   car       1610      28074      0.528      0.686       0.65      0.381\n",
      "                   van       1610       5771      0.282      0.377      0.265      0.159\n",
      "                 truck       1610       2659        0.3      0.374      0.269      0.146\n",
      "                person       1610       6376      0.342      0.139      0.117      0.035\n",
      "               bicycle       1610       1302      0.199     0.0876     0.0618     0.0223\n",
      "                   bus       1610       2940       0.57      0.497      0.498      0.308\n",
      "             motorbike       1610       5845      0.354      0.264        0.2     0.0684\n",
      "            pedestrian       1610      21006      0.367       0.24      0.223      0.083\n",
      "              tricycle       1610        530      0.208      0.123     0.0813     0.0395\n",
      "       awning-tricycle       1610        599      0.325     0.0935     0.0905     0.0465\n",
      "Speed: 0.1ms pre-process, 5.2ms inference, 6.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp326\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/vis_k_v_o_base_50/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.552     0.0319     0.0329     0.0225\n",
      "                   car        600        113      0.206     0.0354     0.0306     0.0176\n",
      "                   van        600          6          1          0          0          0\n",
      "                 truck        600         17          0          0   0.000978   0.000577\n",
      "                person        600       1131          1          0     0.0168    0.00596\n",
      "               bicycle        600         43          1          0    0.00992    0.00461\n",
      "                  bird        600         61          1          0          0          0\n",
      "                  boat        600         82          1          0          0          0\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.941      0.667      0.665      0.466\n",
      "                   cat        600          5          0          0          0          0\n",
      "                 chair        600         12          1          0          0          0\n",
      "                   dog        600         25          0          0          0          0\n",
      "                 horse        600         37          1          0          0          0\n",
      "                 sheep        600          8          1          0          0          0\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1          0          0          0          0\n",
      "                monkey        600         16          0          0          0          0\n",
      "                   pig        600          7          0          0          0          0\n",
      "                   toy        600         42          0          0          0          0\n",
      "         traffic light        600          5          0          0          0          0\n",
      "          traffic sign        600          1          0          0          0          0\n",
      "Speed: 0.1ms pre-process, 2.6ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp327\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/vis_k_v_o_base_50/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.671     0.0202     0.0196     0.0128\n",
      "                   car       4952       1201      0.121       0.24     0.0891     0.0466\n",
      "                person       4952       4528      0.181     0.0371     0.0297     0.0107\n",
      "             aeroplane       4952        285          0          0          0          0\n",
      "               bicycle       4952        337     0.0387     0.0148    0.00878    0.00477\n",
      "                  bird       4952        459          1          0          0          0\n",
      "                  boat       4952        263          1          0          0          0\n",
      "                bottle       4952        469          1          0    0.00696   0.000696\n",
      "                   bus       4952        213      0.038     0.0845     0.0221     0.0102\n",
      "                   cat       4952        358          1          0          0          0\n",
      "                 chair       4952        756          1          0      0.022     0.0176\n",
      "                   cow       4952        244          1          0     0.0386      0.027\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489          1          0          0          0\n",
      "                 horse       4952        348          1          0          0          0\n",
      "             motorbike       4952        325     0.0464     0.0277    0.00405    0.00128\n",
      "           pottedplant       4952        480          1          0          0          0\n",
      "                 sheep       4952        242          1          0          0          0\n",
      "                  sofa       4952        239          0          0          0          0\n",
      "                 train       4952        282          1          0          0          0\n",
      "             tvmonitor       4952        308          1          0      0.171      0.136\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp328\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/vis_k_v_o_base_50/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.517       0.13      0.165     0.0812\n",
      "                   car       2244       8711      0.592       0.64      0.636      0.317\n",
      "                   van       2244        861      0.129     0.0732     0.0628      0.028\n",
      "                 truck       2244        333      0.126      0.303     0.0871     0.0427\n",
      "                  tram       2244        138          1          0          0          0\n",
      "                person       2244       1286      0.292     0.0218     0.0358     0.0112\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          0          0          0          0\n",
      "                  misc       2244        284          1          0      0.502      0.251\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp329\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/vis_k_v_o_base_50/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5ff2db38-5a08-413d-836f-30ccd1738ad5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "970724f2-21fc-4a09-867d-37aeacec5df9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ee088e1c-a6d2-406c-9653-6074cdb026fb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c83c5022-2ad0-4d3e-b16a-8cf6d041f3d9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "61aca9aa-e44b-4cbb-8e72-e5c6a6bfdda1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "d6c78fc2-648d-4264-b393-9467cf8a3033",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/k_v_o_replay_DER/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=vis_k_v_o_base, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/fe9df603b7b74b519cb6e73e2ffeada8\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 351/355 items from runs/train/k_v_o_replay_DER/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/labels\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m2.95 anchors/target, 0.933 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29644 of 343201 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 342304 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.7493: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9995 best possible recall, 5.74 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.364/0.748-mean/best, past_thr=0.485-mean: 3,5, 4,9, 8,7, 8,15, 16,9, 16,21, 33,17, 29,37, 61,63\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/vis_k_v_o_base/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/vis_k_v_o_base\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.65G     0.1338     0.1345     0.0636        431        640: 1\n",
      "tensor([2.15022], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.035      0.114      0.046      0.018\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      3.85G     0.1134     0.1669    0.04607        589        640: 1\n",
      "tensor([2.41401], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.479      0.162     0.0914     0.0386\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      3.85G     0.1092     0.1731    0.03967        586        640: 1\n",
      "tensor([2.45411], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.185       0.11     0.0494\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      3.85G     0.1061     0.1704     0.0374        785        640: 1\n",
      "tensor([2.29689], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.43      0.207      0.129     0.0609\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      3.85G     0.1037      0.171    0.03551        417        640: 1\n",
      "tensor([2.10539], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.497      0.189      0.148     0.0689\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      3.85G     0.1022     0.1695    0.03407        276        640: 1\n",
      "tensor([1.63650], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.503      0.198      0.164     0.0761\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      3.85G     0.1016     0.1684    0.03294        436        640: 1\n",
      "tensor([2.04686], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.214      0.177     0.0857\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      3.85G     0.1011     0.1685    0.03218        521        640: 1\n",
      "tensor([2.08591], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.206      0.184     0.0902\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      3.85G     0.1002     0.1684    0.03154        326        640: 1\n",
      "tensor([1.72077], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.352       0.22      0.199     0.0968\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      3.85G    0.09989     0.1674    0.03104        498        640: 1\n",
      "tensor([1.99506], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.551      0.229      0.208      0.103\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      3.85G        0.1     0.1663    0.03055        502        640: 1\n",
      "tensor([1.89540], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.391      0.232      0.215      0.104\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      3.85G    0.09917     0.1673    0.03002        568        640: 1\n",
      "tensor([2.17206], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.407       0.24      0.224      0.113\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      3.85G    0.09903     0.1673    0.02952        572        640: 1\n",
      "tensor([2.20499], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.345      0.256      0.229      0.116\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      3.85G    0.09854     0.1649    0.02933        560        640: 1\n",
      "tensor([2.22045], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.343      0.266      0.234      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      3.85G    0.09842     0.1671    0.02909        466        640: 1\n",
      "tensor([1.99618], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.358       0.27      0.239      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      3.85G    0.09815     0.1655     0.0289        709        640: 1\n",
      "tensor([2.32566], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.333       0.28      0.241      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      3.85G    0.09825     0.1662     0.0286        440        640: 1\n",
      "tensor([1.84718], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.33      0.281       0.25      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      3.85G    0.09821     0.1663    0.02838        580        640: 1\n",
      "tensor([1.94775], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.338      0.284      0.249      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      3.85G    0.09753     0.1652    0.02828        503        640: 1\n",
      "tensor([1.96718], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.34      0.292      0.253       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      3.85G     0.0975     0.1659    0.02795        426        640: 1\n",
      "tensor([1.74765], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.345       0.29      0.261      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      3.85G    0.09733     0.1629    0.02772        705        640: 1\n",
      "tensor([1.95570], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.354      0.293       0.26      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      3.85G      0.097     0.1626    0.02765        907        640: 1\n",
      "tensor([2.55484], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.366      0.294      0.264      0.137\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      3.85G    0.09657      0.163    0.02742        591        640: 1\n",
      "tensor([2.18156], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.362      0.298      0.268      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      3.85G    0.09679     0.1616    0.02728        567        640: 1\n",
      "tensor([1.94189], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.364      0.298      0.269       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      3.85G    0.09654     0.1616    0.02704        519        640: 1\n",
      "tensor([1.90336], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.365        0.3      0.271      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      3.85G    0.09618      0.162    0.02693        751        640: 1\n",
      "tensor([2.18074], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.358      0.303       0.27      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      3.85G    0.09623     0.1619    0.02688        335        640: 1\n",
      "tensor([1.64430], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.374      0.296      0.273      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      3.85G    0.09592     0.1615     0.0268        754        640: 1\n",
      "tensor([2.49421], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.369      0.301       0.28      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      3.85G    0.09627      0.161    0.02665        637        640: 1\n",
      "tensor([1.98934], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.389      0.299      0.284      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      3.85G    0.09647     0.1621    0.02654       1044        640: 1\n",
      "tensor([2.20339], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.383      0.302       0.28      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      3.85G    0.09583     0.1616    0.02643        288        640: 1\n",
      "tensor([1.51436], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.384        0.3      0.284      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      3.85G    0.09561      0.161    0.02636        628        640: 1\n",
      "tensor([2.07178], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.395      0.299      0.285      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      3.85G    0.09612     0.1619    0.02612        580        640: 1\n",
      "tensor([2.14435], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.392      0.305       0.29      0.153\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      3.85G     0.0957     0.1619    0.02609        746        640: 1\n",
      "tensor([2.20239], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.384       0.31      0.292      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      3.85G    0.09523     0.1609    0.02609        455        640: 1\n",
      "tensor([1.81909], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.396       0.31      0.291      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      3.85G    0.09562     0.1607    0.02593        505        640: 1\n",
      "tensor([1.91136], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.392       0.31      0.291      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      3.85G    0.09546     0.1608    0.02584        434        640: 1\n",
      "tensor([1.84048], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.39      0.307      0.289      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      3.85G    0.09549     0.1611    0.02574        590        640: 1\n",
      "tensor([2.18765], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.307      0.295      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      3.85G    0.09484     0.1585    0.02568        686        640: 1\n",
      "tensor([2.13538], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.405      0.312      0.298       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      3.85G    0.09511      0.159    0.02578        590        640: 1\n",
      "tensor([2.02694], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.388      0.311      0.292      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      3.85G    0.09481     0.1585     0.0254        650        640: 1\n",
      "tensor([2.10342], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.404      0.317        0.3       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      3.85G    0.09489     0.1575    0.02523        679        640: 1\n",
      "tensor([2.03537], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.406      0.308      0.299      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      3.85G    0.09479     0.1595    0.02531        508        640: 1\n",
      "tensor([1.88700], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.313      0.303      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      3.85G    0.09459     0.1596    0.02528        765        640: 1\n",
      "tensor([2.00467], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401       0.32      0.303      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      3.85G     0.0946     0.1591    0.02514        518        640: 1\n",
      "tensor([2.01546], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.315      0.299      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      3.85G    0.09452      0.158    0.02504        589        640: 1\n",
      "tensor([1.99969], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.413      0.314      0.303      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      3.85G    0.09454     0.1584    0.02492        615        640: 1\n",
      "tensor([2.10705], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.406      0.318      0.303      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      3.85G    0.09461     0.1579    0.02492        603        640: 1\n",
      "tensor([2.09300], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.405      0.319      0.302      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      3.85G    0.09491     0.1576    0.02497        606        640: 1\n",
      "tensor([1.98582], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.423      0.319      0.307      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      3.85G    0.09424     0.1586    0.02476        549        640: 1\n",
      "tensor([1.97324], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.412      0.318      0.307      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      3.85G     0.0944     0.1586    0.02479        300        640: 1\n",
      "tensor([1.45487], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.416      0.323      0.309      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/99      3.85G    0.09394     0.1565    0.02477        339        640: 1\n",
      "tensor([1.65680], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.424      0.317      0.306      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/99      3.85G    0.09426     0.1573    0.02468        540        640: 1\n",
      "tensor([1.82079], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.413       0.32      0.306      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/99      3.85G    0.09401     0.1561    0.02453        424        640: 1\n",
      "tensor([1.72692], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.319      0.311      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/99      3.85G    0.09406     0.1564    0.02446        653        640: 1\n",
      "tensor([2.12143], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.431      0.318       0.31      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/99      3.85G    0.09398     0.1577    0.02447        489        640: 1\n",
      "tensor([1.74511], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.426      0.325      0.313       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/99      3.85G    0.09391     0.1567    0.02431        786        640: 1\n",
      "tensor([2.31134], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.324      0.312      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/99      3.85G    0.09372     0.1552    0.02442        492        640: 1\n",
      "tensor([1.89747], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425       0.32       0.31      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/99      3.85G    0.09366     0.1565    0.02404        387        640: 1\n",
      "tensor([1.57968], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.418      0.329      0.314       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/99      3.85G    0.09348     0.1556    0.02409        485        640: 1\n",
      "tensor([1.79061], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.428      0.325      0.314      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/99      3.85G    0.09368     0.1556    0.02433        547        640: 1\n",
      "tensor([1.90844], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436      0.323      0.318      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/99      3.85G    0.09333     0.1552    0.02414        386        640: 1\n",
      "tensor([1.69783], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.437      0.324      0.316      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/99      3.85G    0.09355      0.157    0.02419        532        640: 1\n",
      "tensor([1.81154], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.433      0.319      0.314      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/99      3.85G     0.0934     0.1542    0.02399        540        640: 1\n",
      "tensor([2.00490], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.422      0.323      0.314      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/99      3.85G    0.09347     0.1552    0.02399        351        640: 1\n",
      "tensor([1.65705], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.427      0.323      0.312       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/99      3.85G    0.09316     0.1554    0.02409        458        640: 1\n",
      "tensor([1.66953], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436       0.32      0.314      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/99      3.85G    0.09295     0.1531    0.02369        533        640: 1\n",
      "tensor([1.96350], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.43      0.324      0.316      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/99      3.85G    0.09351     0.1548    0.02375        879        640: 1\n",
      "tensor([2.25108], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436      0.323      0.316      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/99      3.85G    0.09334     0.1553    0.02377        783        640: 1\n",
      "tensor([2.55506], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.433      0.321      0.316      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/99      3.85G    0.09345     0.1549    0.02378        463        640: 1\n",
      "tensor([1.78704], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.432      0.323      0.314      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/99      3.85G      0.093     0.1539    0.02369        319        640: 1\n",
      "tensor([1.58369], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.43      0.325      0.316      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/99      3.85G      0.093     0.1548    0.02366        371        640: 1\n",
      "tensor([1.66658], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.44      0.327      0.318      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/99      3.85G      0.093     0.1532    0.02372        532        640: 1\n",
      "tensor([1.97070], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.433      0.332       0.32      0.174\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/99      3.85G    0.09288     0.1533    0.02362        636        640: 1\n",
      "tensor([2.15560], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.437       0.33      0.319      0.174\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/99      3.85G    0.09303     0.1541    0.02344        455        640: 1\n",
      "tensor([1.73190], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.44      0.329      0.319      0.174\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/99      3.85G    0.09271     0.1552    0.02348        432        640: 1\n",
      "tensor([1.63775], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.439      0.328       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/99      3.85G    0.09262     0.1518    0.02327       1098        640: 1\n",
      "tensor([2.68848], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.439       0.33      0.319      0.174\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/99      3.85G    0.09259     0.1527    0.02329        610        640: 1\n",
      "tensor([1.97092], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436       0.33      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/99      3.85G    0.09259     0.1536    0.02335        722        640: 1\n",
      "tensor([2.19839], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436      0.332       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/99      3.85G    0.09262      0.153    0.02319        513        640: 1\n",
      "tensor([1.91114], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436       0.33       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      80/99      3.85G    0.09245     0.1516    0.02319        601        640: 1\n",
      "tensor([2.07479], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.441       0.33       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      81/99      3.85G    0.09231     0.1524     0.0233        549        640: 1\n",
      "tensor([1.72428], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.442      0.329      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      82/99      3.85G    0.09253     0.1524    0.02307        649        640: 1\n",
      "tensor([1.92046], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.451      0.327       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      83/99      3.85G    0.09242     0.1522     0.0233        641        640: 1\n",
      "tensor([1.93558], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.444      0.326      0.319      0.174\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      84/99      3.85G    0.09227     0.1521    0.02317        414        640: 1\n",
      "tensor([1.57677], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.441      0.331       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      85/99      3.85G    0.09211     0.1506    0.02317        561        640: 1\n",
      "tensor([1.82229], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.442       0.33      0.321      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      86/99      3.85G    0.09216     0.1515    0.02306        760        640: 1\n",
      "tensor([2.18081], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.435      0.334      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      87/99      3.85G    0.09232      0.153    0.02307        471        640: 1\n",
      "tensor([1.70957], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.443      0.334      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      88/99      3.85G    0.09212     0.1524    0.02287        307        640: 1\n",
      "tensor([1.51286], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.45      0.328      0.322      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      89/99      3.85G    0.09221     0.1508    0.02289        555        640: 1\n",
      "tensor([1.86733], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.452      0.325      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      90/99      3.85G    0.09204     0.1501    0.02283        595        640: 1\n",
      "tensor([1.89721], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.443       0.33       0.32      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      91/99      3.85G    0.09251     0.1514    0.02286        365        640: 1\n",
      "tensor([1.59158], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.441      0.329      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      92/99      3.85G    0.09174     0.1498    0.02268        450        640: 1\n",
      "tensor([1.91277], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.443      0.329      0.322      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      93/99      3.85G    0.09217     0.1521    0.02273        363        640: 1\n",
      "tensor([1.50356], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.442       0.33      0.321      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      94/99      3.85G    0.09201     0.1524    0.02274        682        640: 1\n",
      "tensor([2.12659], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436      0.331      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      95/99      3.85G    0.09163     0.1499    0.02257        348        640: 1\n",
      "tensor([1.47463], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.442      0.332      0.321      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      96/99      3.85G    0.09157     0.1506    0.02241        801        640: 1\n",
      "tensor([2.10051], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.441      0.332      0.321      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      97/99      3.85G    0.09176      0.151    0.02258        452        640: 1\n",
      "tensor([1.84477], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.442      0.331      0.322      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      98/99      3.85G    0.09142     0.1489    0.02245        783        640: 1\n",
      "tensor([2.02330], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.444      0.333      0.322      0.178\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      99/99      3.85G     0.0919     0.1504    0.02262        743        640: 1\n",
      "tensor([2.14061], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.443      0.331      0.321      0.177\n",
      "\n",
      "100 epochs completed in 1.413 hours.\n",
      "Optimizer stripped from runs/train/vis_k_v_o_base/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/vis_k_v_o_base/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/vis_k_v_o_base/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.438      0.311      0.304      0.169\n",
      "                   car        548      14064      0.617      0.701      0.708      0.473\n",
      "                   van        548       1975      0.439      0.322      0.325      0.219\n",
      "                 truck        548        750      0.486      0.299      0.299      0.188\n",
      "                person        548       5125      0.432      0.273      0.264     0.0927\n",
      "               bicycle        548       1287      0.242      0.134     0.0976     0.0363\n",
      "                   bus        548        251      0.525      0.402      0.407      0.261\n",
      "             motorbike        548       4886      0.449      0.353      0.312      0.122\n",
      "            pedestrian        548       8844      0.448      0.367       0.37      0.158\n",
      "              tricycle        548       1045      0.435      0.159       0.16     0.0828\n",
      "       awning-tricycle        548        532      0.304      0.105      0.097     0.0612\n",
      "Results saved to \u001b[1mruns/train/vis_k_v_o_base\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : vis_k_v_o_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/fe9df603b7b74b519cb6e73e2ffeada8\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_f1              : 0.15638402187858605\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_false_positives : 128.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5          : 0.09702985298067726\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5:.95      : 0.0611830765916081\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_precision       : 0.3040409779148518\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_recall          : 0.10526315789473684\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_support         : 532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_true_positives  : 56.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                      : 0.1722678979676735\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives         : 538.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                  : 0.09760247872249696\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95              : 0.03628980908363989\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision               : 0.24229114392208614\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                  : 0.13364413364413363\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support                 : 1287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives          : 172.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                          : 0.45568206107071363\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives             : 91.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                      : 0.40659871603188386\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                  : 0.26081704956940965\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                   : 0.5252441319034696\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                      : 0.40239043824701193\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                     : 251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives              : 101.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                          : 0.6560255932873547\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives             : 6126.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                      : 0.7080985771500364\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                  : 0.4732490378818758\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                   : 0.6166867920225847\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                      : 0.7007252559726962\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                     : 14064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives              : 9855.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [4010]                     : (3.1904220581054688, 6.312022686004639)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [200]           : (0.04595493212473652, 0.32167998989821983)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [200]      : (0.017999785705553967, 0.17760226843165508)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [200]         : (0.03501674362730931, 0.5507045861699263)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [200]            : (0.11407392497019404, 0.33444195667597815)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_f1                    : 0.3950703517765809\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_false_positives       : 2118.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5                : 0.3118397246411623\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5:.95            : 0.12210581650734786\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_precision             : 0.4487760770236445\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_recall                : 0.3528448628735162\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_support               : 4886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_true_positives        : 1724.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_f1                   : 0.40368875036150664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_false_positives      : 4004.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5               : 0.36992152021516006\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5:.95           : 0.15770405515009595\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_precision            : 0.4479797771451285\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_recall               : 0.36736770691994575\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_support              : 8844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_true_positives       : 3249.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                       : 0.3349194880149478\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives          : 1839.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                   : 0.2635945829741849\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95               : 0.09271986028520449\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision                : 0.4324637205558253\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                   : 0.2732799140604019\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                  : 5125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives           : 1401.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [200]            : (0.09142343699932098, 0.13379372656345367)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [200]            : (0.022414514794945717, 0.06359588354825974)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [200]            : (0.13454082608222961, 0.17306289076805115)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_f1                     : 0.23273433702204205\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_false_positives        : 216.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5                 : 0.16002743889902243\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5:.95             : 0.08275391132257154\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_precision              : 0.4351026121053559\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_recall                 : 0.15885167464114833\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_support                : 1045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_true_positives         : 166.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                        : 0.36986466739409224\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives           : 237.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                    : 0.29900531683832343\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95                : 0.18807439425441488\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                 : 0.4856328095662875\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                    : 0.2986666666666667\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                   : 750\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives            : 224.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [200]              : (0.09110228717327118, 0.11348856985569)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [200]              : (0.03029155731201172, 0.055173493921756744)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [200]              : (0.2250179648399353, 0.2392815500497818)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                          : 0.3711757872794414\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives             : 812.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                      : 0.3245608926010995\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                  : 0.2192171962412166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                   : 0.43897264415989184\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                      : 0.32151898734177214\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                     : 1975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives              : 635.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [200]                     : (0.0002980000000000002, 0.07007407407407407)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [200]                     : (0.0002980000000000002, 0.009793932510288067)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [200]                     : (0.0002980000000000002, 0.009793932510288067)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : vis_k_v_o_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/fe9df603b7b74b519cb6e73e2ffeada8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/vis_k_v_o_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.14 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Still uploading 1 file(s), remaining 29.78 KB/333.78 KB\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "--name vis_k_v_o_base \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "e864a8a1-dc1f-40ad-b450-ade36b5ccd26",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/vis_k_v_o_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102       0.36      0.288      0.251      0.134\n",
      "                   car       1610      28074      0.555      0.663       0.64      0.378\n",
      "                   van       1610       5771      0.297      0.368      0.278      0.169\n",
      "                 truck       1610       2659      0.354      0.381      0.301      0.168\n",
      "                person       1610       6376      0.345      0.135      0.116      0.036\n",
      "               bicycle       1610       1302      0.207     0.0653     0.0537     0.0196\n",
      "                   bus       1610       2940      0.597      0.501      0.512      0.324\n",
      "             motorbike       1610       5845      0.359      0.261      0.198     0.0684\n",
      "            pedestrian       1610      21006      0.381      0.237      0.225     0.0853\n",
      "              tricycle       1610        530       0.21      0.155     0.0806     0.0342\n",
      "       awning-tricycle       1610        599      0.294      0.116      0.104      0.054\n",
      "Speed: 0.1ms pre-process, 4.1ms inference, 5.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp284\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/vis_k_v_o_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.701     0.0319     0.0334     0.0244\n",
      "                   car        600        113      0.515     0.0354     0.0351     0.0212\n",
      "                   van        600          6          0          0          0          0\n",
      "                 truck        600         17          1          0     0.0191     0.0113\n",
      "                person        600       1131          1          0     0.0148    0.00555\n",
      "               bicycle        600         43          1          0   0.000491   0.000144\n",
      "                  bird        600         61          1          0          0          0\n",
      "                  boat        600         82          1          0          0          0\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.901      0.667      0.665        0.5\n",
      "                   cat        600          5          1          0          0          0\n",
      "                 chair        600         12          1          0          0          0\n",
      "                   dog        600         25          1          0          0          0\n",
      "                 horse        600         37          1          0          0          0\n",
      "                 sheep        600          8          1          0          0          0\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1          0          0          0          0\n",
      "                monkey        600         16          1          0          0          0\n",
      "                   pig        600          7          0          0          0          0\n",
      "                   toy        600         42          0          0          0          0\n",
      "         traffic light        600          5          0          0          0          0\n",
      "          traffic sign        600          1          0          0          0          0\n",
      "Speed: 0.1ms pre-process, 2.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp285\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/vis_k_v_o_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.726      0.014    0.00807    0.00368\n",
      "                   car       4952       1201      0.175      0.188      0.087     0.0425\n",
      "                person       4952       4528      0.222     0.0201     0.0227    0.00913\n",
      "             aeroplane       4952        285          0          0          0          0\n",
      "               bicycle       4952        337     0.0387     0.0119    0.00363   0.000828\n",
      "                  bird       4952        459          1          0          0          0\n",
      "                  boat       4952        263          1          0          0          0\n",
      "                bottle       4952        469          1          0     0.0152    0.00304\n",
      "                   bus       4952        213     0.0661     0.0469      0.031     0.0174\n",
      "                   cat       4952        358          1          0          0          0\n",
      "                 chair       4952        756          1          0          0          0\n",
      "                   cow       4952        244          1          0          0          0\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489          1          0          0          0\n",
      "                 horse       4952        348          1          0          0          0\n",
      "             motorbike       4952        325     0.0232     0.0123    0.00186   0.000773\n",
      "           pottedplant       4952        480          1          0          0          0\n",
      "                 sheep       4952        242          1          0          0          0\n",
      "                  sofa       4952        239          1          0          0          0\n",
      "                 train       4952        282          1          0          0          0\n",
      "             tvmonitor       4952        308          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 1.5ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp286\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/vis_k_v_o_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.53     0.0957     0.0975     0.0479\n",
      "                   car       2244       8711       0.71       0.54       0.62      0.309\n",
      "                   van       2244        861      0.145     0.0221     0.0594     0.0276\n",
      "                 truck       2244        333      0.129      0.198     0.0781     0.0405\n",
      "                  tram       2244        138          1          0          0          0\n",
      "                person       2244       1286      0.255    0.00467     0.0227    0.00673\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          1          0          0          0\n",
      "                  misc       2244        284          0          0          0          0\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp287\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/vis_k_v_o_base/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca95bace-9627-4454-8630-a36d26153935",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "24d04872-6ce3-4df1-b75c-3c97b1145b06",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1ef826e7-33c9-453a-828e-4deb0a77b934",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "704dbe4a-66d6-4cb2-a779-fd0ace35ed69",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/k_v_o_replay_DER/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=vis_k_v_o_Lwf, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.0001], Lwf_temperature=1.0, Old_models=['./runs/train/k_v_o_replay_DER/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/f1aecb1c3bf244d5b4d6354840dec0f9\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 351/355 items from runs/train/k_v_o_replay_DER/weights/last.pt\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/labels\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m2.95 anchors/target, 0.933 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29644 of 343201 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 342304 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.7493: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9995 best possible recall, 5.74 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.364/0.748-mean/best, past_thr=0.485-mean: 3,5, 4,9, 8,7, 8,15, 16,9, 16,21, 33,17, 29,37, 61,63\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/vis_k_v_o_Lwf/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/vis_k_v_o_Lwf\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.51G     0.1534    0.09646    0.08283       1697        640:  \u001b[1;38;5;214mCOMET WARNING:\u001b[0m Unknown error retrieving Conda package as an explicit file\n",
      "       0/49      3.51G      0.134     0.1345    0.06896        431        640: 1\n",
      "tensor([2.52467], device='cuda:0', grad_fn=<AddBackward0>) tensor(3356.09204, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 130143 ms: Connection timed out\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759     0.0295       0.12     0.0418     0.0163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.71G     0.1137     0.1641    0.05442        589        640: 1\n",
      "tensor([2.74115], device='cuda:0', grad_fn=<AddBackward0>) tensor(2851.40332, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.275       0.16     0.0818      0.032\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.71G     0.1092     0.1705    0.04877        586        640: 1\n",
      "tensor([2.79971], device='cuda:0', grad_fn=<AddBackward0>) tensor(2817.52466, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.323      0.189      0.106     0.0458\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.71G     0.1053     0.1695    0.04649        785        640: 1\n",
      "tensor([2.61901], device='cuda:0', grad_fn=<AddBackward0>) tensor(2710.27075, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.318      0.205      0.121     0.0561\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.71G     0.1029     0.1699    0.04504        417        640: 1\n",
      "tensor([2.41824], device='cuda:0', grad_fn=<AddBackward0>) tensor(2544.80640, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.376      0.201      0.131      0.062\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.71G     0.1015     0.1684    0.04391        276        640: 1\n",
      "tensor([1.95304], device='cuda:0', grad_fn=<AddBackward0>) tensor(2424.38306, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.201      0.151       0.07\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.71G      0.101     0.1671    0.04295        436        640: 1\n",
      "tensor([2.35690], device='cuda:0', grad_fn=<AddBackward0>) tensor(2601.12500, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.41      0.203      0.161     0.0761\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.71G     0.1005     0.1671    0.04221        521        640: 1\n",
      "tensor([2.39524], device='cuda:0', grad_fn=<AddBackward0>) tensor(2570.62524, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.419      0.204      0.169     0.0799\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.71G    0.09962      0.167    0.04168        326        640: 1\n",
      "tensor([2.01102], device='cuda:0', grad_fn=<AddBackward0>) tensor(2312.47070, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.422      0.217      0.177     0.0845\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.71G    0.09929      0.166    0.04128        883        640:  "
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "--name vis_k_v_o_Lwf \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#15:41:07开始16:32:53结束\n",
    "# (19 + 32) / 60 = 0.85"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "28822fbb-dd2e-4f05-bcd5-38dbb6030ed5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.334       0.26      0.222      0.115\n",
      "                   car       1610      28074      0.538      0.656      0.632      0.368\n",
      "                   van       1610       5771      0.245      0.361      0.238      0.143\n",
      "                 truck       1610       2659      0.266      0.338      0.226      0.119\n",
      "                person       1610       6376      0.329      0.114     0.0909     0.0261\n",
      "               bicycle       1610       1302      0.211     0.0768     0.0559     0.0186\n",
      "                   bus       1610       2940      0.574      0.474      0.464      0.282\n",
      "             motorbike       1610       5845      0.348      0.235      0.184     0.0643\n",
      "            pedestrian       1610      21006      0.386      0.215      0.224     0.0837\n",
      "              tricycle       1610        530      0.142     0.0642     0.0356     0.0164\n",
      "       awning-tricycle       1610        599      0.298     0.0618     0.0661     0.0317\n",
      "Speed: 0.1ms pre-process, 3.8ms inference, 40.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp288\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.812     0.0168     0.0215     0.0122\n",
      "                   car        600        113      0.104     0.0354     0.0412     0.0219\n",
      "                   van        600          6          0          0          0          0\n",
      "                 truck        600         17          0          0    0.00175    0.00137\n",
      "                person        600       1131          1          0     0.0297     0.0104\n",
      "               bicycle        600         43          0          0     0.0139    0.00341\n",
      "                  bird        600         61          1          0    0.00832    0.00383\n",
      "                  boat        600         82          1          0    0.00169   0.000445\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.766      0.333      0.374      0.226\n",
      "                   cat        600          5          1          0          0          0\n",
      "                 chair        600         12          1          0          0          0\n",
      "                   dog        600         25          1          0   0.000745   0.000619\n",
      "                 horse        600         37          1          0    0.00105   0.000278\n",
      "                 sheep        600          8          1          0          0          0\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1          1          0          0          0\n",
      "                monkey        600         16          1          0          0          0\n",
      "                   pig        600          7          1          0          0          0\n",
      "                   toy        600         42          1          0   0.000842   0.000285\n",
      "         traffic light        600          5          1          0          0          0\n",
      "          traffic sign        600          1          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 2.6ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp289\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.78     0.0202     0.0112    0.00545\n",
      "                   car       4952       1201      0.192      0.273      0.132     0.0691\n",
      "                person       4952       4528      0.305     0.0444     0.0476     0.0159\n",
      "             aeroplane       4952        285          1          0   0.000939   0.000343\n",
      "               bicycle       4952        337     0.0296     0.0089    0.00317    0.00117\n",
      "                  bird       4952        459          1          0    0.00261    0.00148\n",
      "                  boat       4952        263          1          0   0.000268   7.98e-05\n",
      "                bottle       4952        469          1          0    0.00186   0.000668\n",
      "                   bus       4952        213      0.051     0.0601     0.0259     0.0154\n",
      "                   cat       4952        358          1          0   0.000305   0.000129\n",
      "                 chair       4952        756          1          0    0.00126   0.000574\n",
      "                   cow       4952        244          1          0    0.00101    0.00048\n",
      "           diningtable       4952        206          1          0   5.24e-05   2.62e-05\n",
      "                   dog       4952        489          1          0   0.000541   0.000181\n",
      "                 horse       4952        348          1          0   0.000511    0.00019\n",
      "             motorbike       4952        325     0.0321     0.0185     0.0041     0.0018\n",
      "           pottedplant       4952        480          1          0    0.00022   9.65e-05\n",
      "                 sheep       4952        242          1          0   0.000986   0.000568\n",
      "                  sofa       4952        239          1          0    0.00044   0.000167\n",
      "                 train       4952        282          1          0   0.000165   0.000139\n",
      "             tvmonitor       4952        308          1          0   0.000952   0.000536\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp290\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.601      0.154      0.107     0.0521\n",
      "                   car       2244       8711       0.52      0.693      0.649      0.331\n",
      "                   van       2244        861      0.108      0.149     0.0596     0.0283\n",
      "                 truck       2244        333     0.0947      0.384     0.0791     0.0385\n",
      "                  tram       2244        138          1          0    0.00103   0.000552\n",
      "                person       2244       1286     0.0867    0.00933     0.0549     0.0154\n",
      "        person_sitting       2244         89          1          0   0.000193   8.05e-05\n",
      "               cyclist       2244        496          1          0    0.00351    0.00102\n",
      "                  misc       2244        284          1          0     0.0051    0.00202\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 2.5ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp291\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/vis_k_v_o_Lwf/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f1bf6a46-9d4a-4cb8-89d7-58817e506325",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "32b29ff2-bcb6-4a4c-9576-b07da3f16221",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a7ffc561-05ee-40af-877f-1517c1c58660",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "41b57513-9bd6-4e55-a7a7-11227b3cdadf",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/k_v_o_replay_DER/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/kvo_VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=vis_k_v_o_replay_base, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/0170acb57bf748649ba3dc7273e1e924\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 351/355 items from runs/train/k_v_o_replay_DER/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old... 11172 images, \u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.00 anchors/target, 0.935 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29652 of 358162 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 357265 points...\n",
      "error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.6874: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9982 best possible recall, 3.53 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.239/0.687-mean/best, past_thr=0.488-mean: 3,6, 8,8, 10,18, 20,11, 33,29, 109,222, 207,141, 203,318, 893,411\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/vis_k_v_o_replay_base/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/vis_k_v_o_replay_base\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.65G    0.09667    0.08939     0.0607        226        640: 1\n",
      "tensor([0.93890], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.144      0.134     0.0677     0.0283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.85G    0.07814    0.08774    0.04544         99        640: 1\n",
      "tensor([0.66990], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.29      0.183     0.0951     0.0432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.85G    0.07525    0.08713    0.03893        240        640: 1\n",
      "tensor([0.77063], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.383      0.189      0.125     0.0588\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.85G    0.07251    0.08755    0.03395        210        640: 1\n",
      "tensor([0.84948], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.443      0.201      0.162     0.0796\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.85G    0.07123    0.08672     0.0311         58        640: 1\n",
      "tensor([0.54836], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.435      0.209      0.183     0.0911\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.85G    0.07002    0.08598    0.02935        366        640: 1\n",
      "tensor([0.99784], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.462      0.216      0.197     0.0989\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.85G    0.06912    0.08547    0.02823        182        640: 1\n",
      "tensor([0.78841], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.402      0.234      0.211      0.108\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.85G    0.06892    0.08518    0.02733        196        640: 1\n",
      "tensor([0.77298], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.322      0.253      0.224      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.85G    0.06837    0.08529    0.02645        294        640: 1\n",
      "tensor([0.86938], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.32      0.265      0.231       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.85G     0.0679    0.08518    0.02603        401        640: 1\n",
      "tensor([0.93549], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.328      0.275      0.241      0.125\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.85G    0.06736    0.08341    0.02542        394        640: 1\n",
      "tensor([0.97919], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.333      0.276      0.241      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.85G     0.0669    0.08273    0.02494        159        640: 1\n",
      "tensor([0.63449], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.347      0.281      0.252      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.85G    0.06663     0.0834    0.02472        274        640: 1\n",
      "tensor([0.96645], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.349      0.294      0.259      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.85G    0.06634    0.08268    0.02413        103        640: 1\n",
      "tensor([0.62992], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.365      0.285      0.263      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.85G    0.06604    0.08315    0.02372        102        640: 1\n",
      "tensor([0.52643], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.366      0.296      0.274      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.85G    0.06557    0.08265    0.02339        145        640: 1\n",
      "tensor([0.59007], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.367      0.292      0.271      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.85G    0.06579    0.08283    0.02317        126        640: 1\n",
      "tensor([0.63169], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.366      0.301      0.276      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.85G    0.06514    0.08217    0.02306        288        640: 1\n",
      "tensor([0.75700], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.376      0.295      0.274      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.85G    0.06496     0.0819    0.02265        188        640: 1\n",
      "tensor([0.78459], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.386      0.297      0.281      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.85G    0.06486    0.08155    0.02246        144        640: 1\n",
      "tensor([0.61505], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.381      0.302      0.284      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.85G    0.06473    0.08125    0.02224        213        640: 1\n",
      "tensor([0.71611], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.386      0.301      0.285      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.85G    0.06443    0.08122    0.02202         66        640: 1\n",
      "tensor([0.48472], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.378      0.308       0.29      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.85G    0.06428    0.08061    0.02175        206        640: 1\n",
      "tensor([0.84008], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.396      0.311      0.294      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.85G    0.06393    0.08049    0.02174        245        640: 1\n",
      "tensor([0.73581], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.397      0.317      0.297      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.85G    0.06437    0.08025    0.02145        301        640: 1\n",
      "tensor([0.91114], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.39      0.313      0.295      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.85G    0.06367    0.07995    0.02112        213        640: 1\n",
      "tensor([0.73713], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.402      0.311      0.299       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.85G    0.06334    0.07882    0.02106        255        640: 1\n",
      "tensor([0.75425], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.397      0.312      0.297      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.85G    0.06363    0.08082     0.0209        129        640: 1\n",
      "tensor([0.63408], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.315      0.301      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.85G    0.06296    0.07948    0.02066        142        640: 1\n",
      "tensor([0.48768], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.319      0.304      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.85G    0.06322    0.08022    0.02058         96        640: 1\n",
      "tensor([0.56470], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.405      0.319      0.305      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.85G    0.06308    0.07993    0.02043        183        640: 1\n",
      "tensor([0.71461], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.407      0.321      0.308      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.85G    0.06255    0.07938     0.0202        165        640: 1\n",
      "tensor([0.63646], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.405      0.322       0.31      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.85G    0.06256    0.08014     0.0199         92        640: 1\n",
      "tensor([0.52381], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409       0.32      0.308      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.85G    0.06262    0.07948    0.01985        221        640: 1\n",
      "tensor([0.75901], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.417      0.325      0.312      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.85G    0.06214    0.07881    0.01964        343        640: 1\n",
      "tensor([0.69277], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.323      0.316      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.85G    0.06185    0.07891    0.01954        415        640: 1\n",
      "tensor([0.87304], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.42      0.325      0.316      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.85G     0.0618    0.07856    0.01946         80        640: 1\n",
      "tensor([0.52340], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.418      0.328      0.317      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.85G    0.06139    0.07837    0.01916        170        640: 1\n",
      "tensor([0.60243], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.42      0.333       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.85G     0.0616    0.07946    0.01919        404        640: 1\n",
      "tensor([0.73409], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.331      0.321      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.85G    0.06145    0.07782    0.01908        275        640: 1\n",
      "tensor([0.82493], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.428      0.327      0.319      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.85G    0.06146    0.07814    0.01899        219        640: 1\n",
      "tensor([0.70932], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.425      0.327      0.319      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.85G    0.06105     0.0781     0.0188        179        640: 1\n",
      "tensor([0.60301], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.419      0.332      0.321      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.85G    0.06084    0.07672    0.01864         44        640: 1\n",
      "tensor([0.40088], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.426      0.329      0.321      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.85G    0.06095    0.07843    0.01855         32        640: 1\n",
      "tensor([0.37256], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.422      0.332       0.32      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.85G    0.06058    0.07769    0.01847        240        640: 1\n",
      "tensor([0.73516], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.426      0.328      0.322      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.85G    0.06037    0.07688    0.01816        229        640: 1\n",
      "tensor([0.67871], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.437      0.328      0.322      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.85G    0.06031     0.0773    0.01824        617        640: 1\n",
      "tensor([1.13187], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.429      0.333      0.323      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.85G    0.05999    0.07624    0.01809         53        640: 1\n",
      "tensor([0.31618], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.426      0.334      0.323      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.85G    0.05997    0.07593    0.01804        157        640: 1\n",
      "tensor([0.56265], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.429      0.333      0.323      0.178\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.85G    0.05963    0.07608    0.01771         38        640: 1\n",
      "tensor([0.40289], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.432      0.332      0.324      0.178\n",
      "\n",
      "50 epochs completed in 1.022 hours.\n",
      "Optimizer stripped from runs/train/vis_k_v_o_replay_base/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/vis_k_v_o_replay_base/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/vis_k_v_o_replay_base/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.423      0.316      0.309      0.172\n",
      "                   car        548      14064       0.62      0.716      0.719      0.477\n",
      "                   van        548       1975      0.418      0.345      0.329       0.22\n",
      "                 truck        548        750      0.443      0.307      0.307      0.194\n",
      "                person        548       5125      0.418      0.281      0.264     0.0905\n",
      "               bicycle        548       1287      0.213       0.14     0.0944     0.0342\n",
      "                   bus        548        251      0.515      0.398      0.417      0.277\n",
      "             motorbike        548       4886      0.448      0.361      0.322      0.123\n",
      "            pedestrian        548       8844      0.461      0.364      0.365      0.151\n",
      "              tricycle        548       1045      0.413      0.145      0.164     0.0847\n",
      "       awning-tricycle        548        532      0.279      0.105      0.105     0.0645\n",
      "Results saved to \u001b[1mruns/train/vis_k_v_o_replay_base\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : vis_k_v_o_replay_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/0170acb57bf748649ba3dc7273e1e924\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_f1              : 0.152915342863394\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_false_positives : 144.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5          : 0.10535045499950535\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5:.95      : 0.0644892886059124\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_precision       : 0.27939732767593467\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_recall          : 0.10526315789473684\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_support         : 532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_true_positives  : 56.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                      : 0.169001151447685\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives         : 663.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                  : 0.0944320752690865\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95              : 0.03423976492728645\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision               : 0.21348185310675405\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                  : 0.13986013986013987\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support                 : 1287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives          : 180.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                          : 0.44919653049034786\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives             : 94.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                      : 0.41676530413440194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                  : 0.27683632614675424\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                   : 0.5148285713386896\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                      : 0.398406374501992\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                     : 251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives              : 100.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                          : 0.6646601590524334\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives             : 6159.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                      : 0.7194331925896753\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                  : 0.4768440000136237\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                   : 0.6203943777942403\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                      : 0.7157281001137656\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                     : 14064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives              : 10066.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [3495]                     : (0.4876781702041626, 5.200316905975342)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]           : (0.0676795008275149, 0.3242126119540646)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]      : (0.028321048591961006, 0.17830563611251876)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]         : (0.14353609277129914, 0.4621322752011866)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]            : (0.13445614840183168, 0.3339311937738232)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_f1                    : 0.3997203713979636\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_false_positives       : 2168.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5                : 0.32150698797152005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5:.95            : 0.12302678277568735\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_precision             : 0.4483274465772817\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_recall                : 0.36062218583708555\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_support               : 4886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_true_positives        : 1762.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_f1                   : 0.40659755066534137\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_false_positives      : 3755.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5               : 0.36517726011178464\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5:.95           : 0.15075788543875424\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_precision            : 0.46125175749483216\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_recall               : 0.3635232926277702\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_support              : 8844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_true_positives       : 3215.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                       : 0.33573957891517786\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives          : 2003.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                   : 0.2642572080156737\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95               : 0.09050451323439548\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision                : 0.41788196269541217\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                   : 0.28058536585365856\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                  : 5125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives           : 1438.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]            : (0.05963350087404251, 0.09667365252971649)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]            : (0.01771022006869316, 0.060699835419654846)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]            : (0.0759272649884224, 0.08939027041196823)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_f1                     : 0.21510089947100983\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_false_positives        : 216.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5                 : 0.16391714408339564\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5:.95             : 0.08469404640586012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_precision              : 0.41271797896635887\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_recall                 : 0.14545454545454545\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_support                : 1045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_true_positives         : 152.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                        : 0.3625339508908914\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives           : 289.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                    : 0.3070314924212002\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95                : 0.19439098438621782\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                 : 0.44329087731594286\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                    : 0.30666666666666664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                   : 750\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives            : 230.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]              : (0.05832437053322792, 0.07856129854917526)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]              : (0.024260997772216797, 0.04669227823615074)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]              : (0.1717180609703064, 0.18536090850830078)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                          : 0.37822693916728206\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives             : 950.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                      : 0.3287093383360812\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                  : 0.22037381709759601\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                   : 0.41795031886947404\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                      : 0.3453990699560319\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                     : 1975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives              : 682.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                     : (0.0004960000000000005, 0.07004291845493563)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                     : (0.0004960000000000005, 0.009599420123986648)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                     : (0.0004960000000000005, 0.009599420123986648)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : vis_k_v_o_replay_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/0170acb57bf748649ba3dc7273e1e924\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/vis_k_v_o_replay_base\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.22 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/kvo_VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "--name vis_k_v_o_replay_base \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3571abd4-f451-44f5-b87b-a16b040fc127",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/vis_k_v_o_replay_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.365       0.29      0.256      0.135\n",
      "                   car       1610      28074      0.577      0.685      0.658      0.386\n",
      "                   van       1610       5771      0.293      0.386      0.283      0.172\n",
      "                 truck       1610       2659       0.34      0.369      0.303      0.165\n",
      "                person       1610       6376      0.332      0.129      0.106      0.031\n",
      "               bicycle       1610       1302      0.195      0.086     0.0628     0.0225\n",
      "                   bus       1610       2940        0.6      0.512      0.518      0.322\n",
      "             motorbike       1610       5845      0.377       0.26      0.206     0.0707\n",
      "            pedestrian       1610      21006      0.407      0.231      0.225     0.0831\n",
      "              tricycle       1610        530      0.235      0.142      0.103      0.049\n",
      "       awning-tricycle       1610        599      0.292      0.103     0.0947      0.049\n",
      "Speed: 0.1ms pre-process, 4.5ms inference, 5.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp292\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/vis_k_v_o_replay_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.572      0.367      0.383      0.267\n",
      "                   car        600        113      0.548      0.451      0.451      0.319\n",
      "                   van        600          6          0          0     0.0213     0.0152\n",
      "                 truck        600         17      0.825      0.279        0.5      0.401\n",
      "                person        600       1131      0.545      0.248      0.308      0.153\n",
      "               bicycle        600         43      0.712      0.372      0.457      0.291\n",
      "                  bird        600         61      0.692      0.588      0.569      0.371\n",
      "                  boat        600         82      0.725      0.317      0.482      0.296\n",
      "                bottle        600          1          0          0     0.0163    0.00816\n",
      "                   bus        600          3      0.491          1      0.913      0.797\n",
      "                   cat        600          5       0.57      0.541      0.491      0.281\n",
      "                 chair        600         12      0.463      0.333      0.361      0.229\n",
      "                   dog        600         25      0.667       0.76      0.704      0.506\n",
      "                 horse        600         37       0.66      0.649      0.657      0.366\n",
      "                 sheep        600          8      0.542      0.595      0.548      0.422\n",
      "                 train        600          2          0          0     0.0086    0.00688\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1      0.325          1      0.497      0.448\n",
      "                monkey        600         16      0.812      0.541      0.713      0.388\n",
      "                   pig        600          7          1      0.409      0.684      0.539\n",
      "                   toy        600         42          0          0     0.0534     0.0267\n",
      "         traffic light        600          5          1          0          0          0\n",
      "          traffic sign        600          1          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 2.9ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp293\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/vis_k_v_o_replay_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.759      0.636      0.685      0.436\n",
      "                   car       4952       1201      0.814      0.827      0.866      0.649\n",
      "                person       4952       4528      0.821      0.732      0.806      0.484\n",
      "             aeroplane       4952        285      0.818      0.536      0.651      0.396\n",
      "               bicycle       4952        337       0.82      0.745      0.802      0.521\n",
      "                  bird       4952        459      0.763      0.575      0.647       0.38\n",
      "                  boat       4952        263      0.644      0.494      0.532       0.29\n",
      "                bottle       4952        469      0.727       0.42      0.468      0.276\n",
      "                   bus       4952        213      0.839      0.732        0.8       0.63\n",
      "                   cat       4952        358      0.794       0.74      0.783      0.506\n",
      "                 chair       4952        756      0.703      0.426      0.504      0.284\n",
      "                   cow       4952        244      0.724      0.717      0.766      0.506\n",
      "           diningtable       4952        206      0.714      0.539      0.576       0.35\n",
      "                   dog       4952        489      0.803      0.635      0.716      0.438\n",
      "                 horse       4952        348      0.878      0.785      0.837      0.557\n",
      "             motorbike       4952        325      0.852      0.708      0.778      0.483\n",
      "           pottedplant       4952        480      0.642      0.416      0.448      0.208\n",
      "                 sheep       4952        242      0.594      0.707      0.642      0.428\n",
      "                  sofa       4952        239      0.669      0.611      0.647      0.424\n",
      "                 train       4952        282      0.848      0.732      0.774      0.488\n",
      "             tvmonitor       4952        308      0.718      0.652       0.66       0.43\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp294\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/vis_k_v_o_replay_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.544      0.527      0.548       0.32\n",
      "                   car       2244       8711      0.754      0.877      0.894        0.6\n",
      "                   van       2244        861      0.616      0.542      0.563      0.364\n",
      "                 truck       2244        333      0.738      0.727      0.787      0.551\n",
      "                  tram       2244        138      0.744      0.758       0.77      0.386\n",
      "                person       2244       1286        0.6      0.611      0.631      0.295\n",
      "        person_sitting       2244         89          0          0     0.0451     0.0178\n",
      "               cyclist       2244        496      0.507      0.575      0.528      0.258\n",
      "                  misc       2244        284      0.391      0.129      0.165      0.088\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp295\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/vis_k_v_o_replay_base/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "327829b8-f291-4539-bccc-f7f6528cdb08",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7d9298b8-655d-4d6a-ab4a-932d6f15b4bd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "40fd6ed1-b628-4053-b553-f8e68395fab9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "b6d6d92d-83f0-4e18-8556-741c5dfcbd7a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/k_v_o_replay_DER/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/kvo_VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=vis_k_v_o_replay_DER, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, Old_models=[], DER_enable=True, DER_old_model=['./runs/train/fog_02/weights/last.pt', './runs/train/der_replay/weights/last.pt', './runs/train/k_v_o_replay_DER/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/72a428bf740141589527a88c0f84c44c\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo_DerTest.Detect              [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "Model summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo_DerTest.Detect              [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "Model summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "Model summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "extractors长度： 3\n",
      "首次创建 extractors\n",
      "成功拼接 extractors\n",
      "extractors共有模型个数： 4\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    441207  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [512, 1024, 2048]]\n",
      "已知类别： 36\n",
      "YOLOv5s_openimages summary: 871 layers, 28711234 parameters, 7463539 gradients, 117.0 GFLOPs\n",
      "\n",
      "Transferred 1416/1788 items from runs/train/k_v_o_replay_DER/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 228 weight(decay=0.0), 273 weight(decay=0.0005), 243 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 11172 im\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.00 anchors/target, 0.935 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29652 of 358162 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 357265 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.6874: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9982 best possible recall, 3.53 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.239/0.687-mean/best, past_thr=0.488-mean: 3,6, 8,8, 10,18, 20,11, 33,29, 109,222, 207,141, 203,318, 893,411\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/vis_k_v_o_replay_DER/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/vis_k_v_o_replay_DER\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      11.9G    0.05614    0.05155    0.03273        226        640: 1\n",
      "tensor([1.67490], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.404      0.149     0.0776     0.0322\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      12.1G    0.04877    0.05085    0.02186         99        640: 1\n",
      "tensor([1.12404], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.377      0.185      0.107     0.0489\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      12.1G     0.0469    0.05001    0.01896        240        640: 1\n",
      "tensor([1.32775], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.434      0.186      0.139     0.0597\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      12.1G    0.04239    0.04919    0.01561        366        640: 1\n",
      "tensor([1.78572], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.365      0.266      0.245      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      12.1G     0.0418    0.04922    0.01528        182        640: 1\n",
      "tensor([1.35140], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.331      0.279      0.249      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      12.1G    0.04115    0.04859    0.01455        196        640: 1\n",
      "tensor([1.36543], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.344       0.29      0.258      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      12.1G    0.04027    0.04839     0.0139        401        640: 1\n",
      "tensor([1.72939], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.374      0.295      0.273      0.139\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      12.1G    0.04026    0.04765    0.01383        394        640: 1\n",
      "tensor([1.82599], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.385      0.296      0.281      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      12.1G    0.04007    0.04751    0.01362        159        640: 1\n",
      "tensor([1.15792], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.397      0.291      0.286      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      12.1G    0.03952    0.04795     0.0133        274        640: 1\n",
      "tensor([1.77438], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.306      0.288      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      12.1G    0.03967     0.0474    0.01326        103        640: 1\n",
      "tensor([1.07894], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.308      0.296      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      12.1G    0.03906    0.04733    0.01288        102        640: 1\n",
      "tensor([0.92176], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.41      0.313      0.303       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      12.1G    0.03914    0.04723    0.01274        145        640: 1\n",
      "tensor([1.08617], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.406      0.309      0.299      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      12.1G    0.03881    0.04675    0.01239        126        640: 1\n",
      "tensor([1.12531], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.413      0.311      0.305      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      12.1G    0.03874    0.04658    0.01244        288        640: 1\n",
      "tensor([1.35829], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.418      0.311      0.305      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      12.1G    0.03858     0.0466    0.01227        188        640: 1\n",
      "tensor([1.37285], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.419      0.317      0.311      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      12.1G    0.03833    0.04609    0.01209        144        640: 1\n",
      "tensor([1.06467], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.407      0.328      0.315      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      12.1G    0.03844    0.04624    0.01203        213        640: 1\n",
      "tensor([1.33201], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.418      0.322      0.313      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      12.1G    0.03837    0.04563    0.01205         66        640: 1\n",
      "tensor([0.81865], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.328      0.312      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      12.1G     0.0383    0.04615    0.01178        206        640: 1\n",
      "tensor([1.44864], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.424      0.323      0.315      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      12.1G    0.03791     0.0458    0.01168        245        640: 1\n",
      "tensor([1.35389], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.428       0.33      0.321      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      12.1G    0.03787    0.04542    0.01142        301        640: 1\n",
      "tensor([1.68550], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.424      0.323      0.316      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      12.1G    0.03764    0.04522    0.01131        213        640: 1\n",
      "tensor([1.34990], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.427      0.333      0.326      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      12.1G    0.03797      0.045    0.01146        255        640: 1\n",
      "tensor([1.40192], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.446      0.331      0.331      0.179\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      12.1G    0.03727    0.04539    0.01115        129        640: 1\n",
      "tensor([1.16284], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.437      0.334      0.328      0.178\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      12.1G     0.0372    0.04496    0.01098        142        640: 1\n",
      "tensor([0.90214], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.436      0.344      0.333      0.182\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      12.1G    0.03713     0.0448    0.01095         96        640: 1\n",
      "tensor([0.96581], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.446      0.333      0.332      0.181\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      12.1G    0.03691     0.0448    0.01074        183        640: 1\n",
      "tensor([1.27540], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.444       0.34      0.332      0.181\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      12.1G    0.03696    0.04479    0.01071        165        640: 1\n",
      "tensor([1.15136], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.446       0.34      0.335      0.183\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      12.1G     0.0365    0.04476    0.01045         92        640: 1\n",
      "tensor([0.83506], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.452      0.343      0.338      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      12.1G    0.03647    0.04446    0.01029        221        640: 1\n",
      "tensor([1.44867], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.456      0.342      0.339      0.186\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      12.1G    0.03668    0.04438    0.01029        343        640: 1\n",
      "tensor([1.33228], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.444      0.344      0.338      0.186\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      12.1G     0.0363    0.04435    0.01024        415        640: 1\n",
      "tensor([1.61251], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.453      0.344       0.34      0.186\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      12.1G    0.03651    0.04403    0.01026         80        640: 1\n",
      "tensor([0.89313], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.448       0.34      0.336      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      12.1G    0.03608    0.04398   0.009928        170        640: 1\n",
      "tensor([1.11039], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.443      0.347      0.342      0.187\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      12.1G    0.03578    0.04405    0.00988        404        640: 1\n",
      "tensor([1.39943], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.44      0.345      0.341      0.188\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      12.1G    0.03614    0.04373   0.009941        275        640: 1\n",
      "tensor([1.55015], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.45      0.343      0.342      0.189\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      12.1G    0.03582    0.04348   0.009703        219        640: 1\n",
      "tensor([1.29060], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.445      0.347      0.342      0.189\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      12.1G    0.03569     0.0437   0.009727        179        640: 1\n",
      "tensor([1.10447], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.454      0.343      0.342      0.188\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      12.1G    0.03591    0.04272   0.009776         44        640: 1\n",
      "tensor([0.59808], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.453      0.349      0.345       0.19\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      12.1G     0.0359    0.04245   0.009514         32        640: 1\n",
      "tensor([0.68967], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.447      0.346      0.345       0.19\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      12.1G    0.03527    0.04324   0.009401        240        640: 1\n",
      "tensor([1.32427], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.444      0.349      0.346      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      12.1G    0.03529    0.04287   0.009307        229        640: 1\n",
      "tensor([1.23310], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.442      0.352      0.345      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      12.1G    0.03506    0.04286   0.009244        617        640: 1\n",
      "tensor([2.12840], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.444      0.352      0.345      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      12.1G    0.03536    0.04234   0.009293         53        640: 1\n",
      "tensor([0.59708], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.454      0.349      0.345      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      12.1G    0.03519    0.04239   0.009166        157        640: 1\n",
      "tensor([1.04205], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.447      0.351      0.343       0.19\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      12.1G    0.03543    0.04188   0.009213         38        640: 1\n",
      "tensor([0.69387], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.447      0.353      0.344      0.191\n",
      "\n",
      "50 epochs completed in 3.023 hours.\n",
      "Optimizer stripped from runs/train/vis_k_v_o_replay_DER/weights/last.pt, 58.3MB\n",
      "Optimizer stripped from runs/train/vis_k_v_o_replay_DER/weights/best.pt, 58.3MB\n",
      "\n",
      "Validating runs/train/vis_k_v_o_replay_DER/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 814 layers, 28701730 parameters, 0 gradients, 116.2 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "Traceback (most recent call last):\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/train_DER.py\", line 1242, in <module>\n",
      "    main(opt)\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/train_DER.py\", line 944, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/train_DER.py\", line 754, in train\n",
      "    results, _, _ = validate.run(\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n",
      "    return func(*args, **kwargs)\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/val.py\", line 345, in run\n",
      "    preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n",
      "    return self._call_impl(*args, **kwargs)\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/models/yolo_DerTest.py\", line 423, in forward\n",
      "    aux_out = self.aux_head(aux_features)\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n",
      "    return self._call_impl(*args, **kwargs)\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/models/yolo_DerTest.py\", line 126, in forward\n",
      "    xy = (xy * 2 + self.grid[i]) * self.stride[i]  # xy\n",
      "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : vis_k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/72a428bf740141589527a88c0f84c44c\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [3495]                : (0.9021357297897339, 8.91825008392334)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]      : (0.07763058737921652, 0.34561319595748907)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100] : (0.032201156253679294, 0.19089738306134646)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]    : (0.3313060298683953, 0.5459174368116121)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]       : (0.14894199130488195, 0.35266624546088077)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]       : (0.035056084394454956, 0.056140754371881485)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]       : (0.009166005998849869, 0.03272948041558266)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]       : (0.041880857199430466, 0.0515454076230526)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]         : (0.05785773694515228, 0.08104702830314636)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]         : (0.023925302550196648, 0.04234657809138298)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]         : (0.16944313049316406, 0.18706682324409485)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                : (0.0004960000000000005, 0.07004291845493563)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                : (0.0004960000000000005, 0.009599420123986648)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                : (0.0004960000000000005, 0.009599420123986648)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : vis_k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/72a428bf740141589527a88c0f84c44c\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : ['./runs/train/fog_02/weights/last.pt', './runs/train/der_replay/weights/last.pt', './runs/train/k_v_o_replay_DER/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/kvo_VisDrone_incremental.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 50\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : vis_k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/vis_k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/k_v_o_replay_DER/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 5 (382.41 KB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/kvo_VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "--name vis_k_v_o_replay_DER \\\n",
    "--DER_enable \\\n",
    "--DER_old_model \\\n",
    "   ./runs/train/fog_02/weights/last.pt \\\n",
    "   ./runs/train/der_replay/weights/last.pt \\\n",
    "   ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "dd9ac2a3-ff1b-4e04-a2f5-609dcf156d66",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/vis_k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 814 layers, 28701730 parameters, 0 gradients, 116.2 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.386      0.317      0.283       0.15\n",
      "                   car       1610      28074      0.603       0.72      0.699      0.418\n",
      "                   van       1610       5771      0.361      0.372      0.322      0.196\n",
      "                 truck       1610       2659      0.343      0.391      0.318       0.18\n",
      "                person       1610       6376      0.387      0.168      0.147     0.0457\n",
      "               bicycle       1610       1302      0.217     0.0952     0.0752     0.0242\n",
      "                   bus       1610       2940      0.593      0.505       0.52      0.332\n",
      "             motorbike       1610       5845      0.377      0.293      0.229     0.0801\n",
      "            pedestrian       1610      21006      0.424      0.248      0.244     0.0918\n",
      "              tricycle       1610        530      0.231      0.213      0.146     0.0722\n",
      "       awning-tricycle       1610        599      0.325      0.162      0.128     0.0632\n",
      "Speed: 0.1ms pre-process, 15.3ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp296\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/vis_k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 814 layers, 28701730 parameters, 0 gradients, 116.2 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.623      0.405      0.444      0.314\n",
      "                   car        600        113      0.549      0.513      0.508      0.378\n",
      "                   van        600          6          0          0      0.015     0.0106\n",
      "                 truck        600         17      0.444      0.294      0.323      0.301\n",
      "                person        600       1131      0.519      0.287       0.32      0.163\n",
      "               bicycle        600         43      0.734      0.386      0.556      0.341\n",
      "                  bird        600         61      0.692       0.59       0.61      0.409\n",
      "                  boat        600         82      0.794      0.423      0.518      0.303\n",
      "                bottle        600          1          0          0     0.0498     0.0448\n",
      "                   bus        600          3      0.605          1      0.995      0.819\n",
      "                   cat        600          5      0.732      0.557      0.702      0.377\n",
      "                 chair        600         12      0.528      0.376      0.431      0.286\n",
      "                   dog        600         25      0.651       0.88      0.675      0.517\n",
      "                 horse        600         37      0.626      0.724      0.648      0.404\n",
      "                 sheep        600          8      0.635      0.625      0.592      0.474\n",
      "                 train        600          2          1          0    0.00927    0.00742\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1      0.521          1      0.995      0.895\n",
      "                monkey        600         16      0.843      0.688      0.815      0.489\n",
      "                   pig        600          7      0.834      0.571      0.819      0.571\n",
      "                   toy        600         42          0          0     0.0482      0.025\n",
      "         traffic light        600          5          1          0          0          0\n",
      "          traffic sign        600          1          1          0      0.142     0.0995\n",
      "Speed: 0.1ms pre-process, 6.3ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp297\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/vis_k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 814 layers, 28701730 parameters, 0 gradients, 116.2 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.778      0.662      0.723      0.473\n",
      "                   car       4952       1201      0.804      0.828      0.868      0.656\n",
      "                person       4952       4528      0.791       0.75      0.811      0.506\n",
      "             aeroplane       4952        285      0.895      0.597       0.73      0.456\n",
      "               bicycle       4952        337      0.841      0.757      0.842      0.561\n",
      "                  bird       4952        459      0.818      0.645      0.717      0.435\n",
      "                  boat       4952        263       0.64      0.597      0.614       0.32\n",
      "                bottle       4952        469        0.8      0.499      0.575      0.344\n",
      "                   bus       4952        213      0.804      0.787      0.834      0.673\n",
      "                   cat       4952        358       0.82      0.768      0.797      0.544\n",
      "                 chair       4952        756      0.707      0.456      0.534      0.315\n",
      "                   cow       4952        244      0.798      0.681      0.781      0.538\n",
      "           diningtable       4952        206      0.677      0.519      0.611      0.389\n",
      "                   dog       4952        489      0.785      0.685      0.748      0.486\n",
      "                 horse       4952        348      0.867      0.807       0.86      0.614\n",
      "             motorbike       4952        325      0.827      0.698      0.776      0.497\n",
      "           pottedplant       4952        480      0.676      0.427      0.473      0.211\n",
      "                 sheep       4952        242      0.661      0.723        0.7      0.483\n",
      "                  sofa       4952        239      0.701       0.64      0.689      0.476\n",
      "                 train       4952        282      0.882      0.752      0.811      0.519\n",
      "             tvmonitor       4952        308      0.769      0.633      0.682      0.444\n",
      "Speed: 0.1ms pre-process, 4.8ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp298\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/vis_k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 814 layers, 28701730 parameters, 0 gradients, 116.2 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.802      0.662      0.731      0.435\n",
      "                   car       2244       8711      0.893      0.837      0.911      0.636\n",
      "                   van       2244        861      0.815      0.648      0.753      0.496\n",
      "                 truck       2244        333      0.916      0.849      0.916      0.645\n",
      "                  tram       2244        138      0.848      0.812      0.877      0.529\n",
      "                person       2244       1286      0.844      0.575      0.679      0.324\n",
      "        person_sitting       2244         89      0.562      0.427       0.42      0.188\n",
      "               cyclist       2244        496      0.795      0.633      0.709      0.354\n",
      "                  misc       2244        284      0.741      0.515      0.586       0.31\n",
      "Speed: 0.0ms pre-process, 2.6ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp299\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/vis_k_v_o_replay_DER/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0623c5a6-285b-4e67-827b-83718c006798",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aa210ccf-a353-4477-a9a7-79bb83fc5abd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ba7c2688-cdce-487e-ae81-f77dc98eed76",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "304a24da-ceb7-4a9c-82ff-aa1657ea05e0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/k_v_o_replay_DER/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/kvo_VisDrone_incremental.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=replay_vis_k_v_o_Lwf, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.0001], Lwf_temperature=1.0, Old_models=['./runs/train/k_v_o_replay_DER/weights/last.pt'], DER_enable=False, DER_old_model=[]\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/34de7b2535ca4cb98cfc046c94bf9fe5\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 351/355 items from runs/train/k_v_o_replay_DER/weights/last.pt\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old... 11172 images, \u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000137_02220_d_0000163.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/0000140_00118_d_0000002.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999945_00000_d_0000114.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-train/images/9999987_00000_d_0000049.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-val/labels.cac\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.00 anchors/target, 0.935 Best Possible Recall (BPR). Anchors are a poor fit to dataset ⚠️, attempting to improve...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mWARNING ⚠️ Extremely small objects found: 29652 of 358162 labels are <3 pixels in size\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mRunning kmeans for 9 anchors on 357265 points...\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mEvolving anchors with Genetic Algorithm: fitness = 0.6874: 100%|████\u001b[0m\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mthr=0.25: 0.9982 best possible recall, 3.53 anchors past thr\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mn=9, img_size=640, metric_all=0.239/0.687-mean/best, past_thr=0.488-mean: 3,6, 8,8, 10,18, 20,11, 33,29, 109,222, 207,141, 203,318, 893,411\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mDone ✅ (optional: update model *.yaml to use these anchors in the future)\n",
      "Plotting labels to runs/train/replay_vis_k_v_o_Lwf/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/replay_vis_k_v_o_Lwf\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.51G    0.09686    0.08944    0.06562        226        640: 1\n",
      "tensor([1.24702], device='cuda:0', grad_fn=<AddBackward0>) tensor(2830.58765, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759     0.0412       0.16     0.0639     0.0272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.71G    0.07791    0.08711    0.05159         99        640: 1\n",
      "tensor([0.88510], device='cuda:0', grad_fn=<AddBackward0>) tensor(2034.89087, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.291      0.175        0.1      0.046\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.71G    0.07468    0.08678    0.04526        240        640: 1\n",
      "tensor([0.98634], device='cuda:0', grad_fn=<AddBackward0>) tensor(2033.81372, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.377      0.189      0.122     0.0583\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.71G    0.07169    0.08715    0.04058        210        640: 1\n",
      "tensor([1.08541], device='cuda:0', grad_fn=<AddBackward0>) tensor(2267.22803, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.453      0.202      0.145     0.0704\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.71G    0.07042    0.08617    0.03776         58        640: 1\n",
      "tensor([0.77117], device='cuda:0', grad_fn=<AddBackward0>) tensor(2241.26904, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.31      0.211      0.165     0.0807\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.71G    0.06908    0.08534    0.03598        366        640: 1\n",
      "tensor([1.24148], device='cuda:0', grad_fn=<AddBackward0>) tensor(2472.66162, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.468      0.211      0.181     0.0901\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.71G    0.06834    0.08491    0.03505        182        640: 1\n",
      "tensor([1.01909], device='cuda:0', grad_fn=<AddBackward0>) tensor(2258.68994, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.349      0.222      0.192      0.096\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.71G    0.06815    0.08466    0.03405        196        640: 1\n",
      "tensor([1.00373], device='cuda:0', grad_fn=<AddBackward0>) tensor(2118.70923, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.475      0.217      0.201      0.103\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.71G    0.06766    0.08482    0.03326        294        640: 1\n",
      "tensor([1.13320], device='cuda:0', grad_fn=<AddBackward0>) tensor(2240.72095, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.502      0.216      0.211      0.108\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.71G    0.06726     0.0846    0.03289        401        640: 1\n",
      "tensor([1.18642], device='cuda:0', grad_fn=<AddBackward0>) tensor(2344.26807, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.413      0.228      0.214       0.11\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.71G    0.06656    0.08284    0.03216        394        640: 1\n",
      "tensor([1.23764], device='cuda:0', grad_fn=<AddBackward0>) tensor(2285.22998, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.431      0.226      0.219      0.113\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.71G    0.06626    0.08215    0.03179        159        640: 1\n",
      "tensor([0.87905], device='cuda:0', grad_fn=<AddBackward0>) tensor(1956.36450, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.393      0.256      0.228      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.71G    0.06591    0.08283    0.03157        274        640: 1\n",
      "tensor([1.26333], device='cuda:0', grad_fn=<AddBackward0>) tensor(2341.29785, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.347      0.257      0.232      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.71G    0.06573    0.08213    0.03101        103        640: 1\n",
      "tensor([0.84195], device='cuda:0', grad_fn=<AddBackward0>) tensor(2002.20691, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.365      0.256      0.235      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.71G    0.06536    0.08259    0.03063        102        640: 1\n",
      "tensor([0.73202], device='cuda:0', grad_fn=<AddBackward0>) tensor(1816.91187, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.338      0.272      0.247       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.71G    0.06493    0.08202     0.0304        145        640: 1\n",
      "tensor([0.83255], device='cuda:0', grad_fn=<AddBackward0>) tensor(2142.57397, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.329      0.266      0.241      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.71G    0.06528    0.08223    0.03009        126        640: 1\n",
      "tensor([0.85210], device='cuda:0', grad_fn=<AddBackward0>) tensor(2004.46570, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.338      0.276       0.25      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.71G    0.06454     0.0816    0.02989        288        640: 1\n",
      "tensor([0.98188], device='cuda:0', grad_fn=<AddBackward0>) tensor(2111.39844, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.351      0.279      0.252      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.71G    0.06439    0.08131    0.02957        188        640: 1\n",
      "tensor([1.00182], device='cuda:0', grad_fn=<AddBackward0>) tensor(2100.59204, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.355      0.279      0.259      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.71G    0.06427    0.08088    0.02923        144        640: 1\n",
      "tensor([0.82760], device='cuda:0', grad_fn=<AddBackward0>) tensor(2002.32727, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.367       0.28      0.261      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.71G    0.06416    0.08071    0.02911        213        640: 1\n",
      "tensor([0.96382], device='cuda:0', grad_fn=<AddBackward0>) tensor(2084.33057, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.367      0.279      0.262       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.71G     0.0639    0.08057    0.02891         66        640: 1\n",
      "tensor([0.68109], device='cuda:0', grad_fn=<AddBackward0>) tensor(1631.36963, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.377      0.284      0.268      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.71G    0.06377    0.08012    0.02859        206        640: 1\n",
      "tensor([1.12989], device='cuda:0', grad_fn=<AddBackward0>) tensor(2858.02905, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.38      0.285      0.271      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.71G    0.06351       0.08    0.02859        245        640: 1\n",
      "tensor([0.97648], device='cuda:0', grad_fn=<AddBackward0>) tensor(2082.08643, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.378      0.295      0.276      0.147\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.71G    0.06394    0.07976    0.02826        301        640: 1\n",
      "tensor([1.19080], device='cuda:0', grad_fn=<AddBackward0>) tensor(2502.09204, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.375      0.292      0.272      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.71G     0.0632    0.07937    0.02794        213        640: 1\n",
      "tensor([0.96865], device='cuda:0', grad_fn=<AddBackward0>) tensor(2102.04980, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.376      0.294      0.281       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.71G    0.06297    0.07829    0.02785        255        640: 1\n",
      "tensor([1.01163], device='cuda:0', grad_fn=<AddBackward0>) tensor(2271.35840, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.391      0.294      0.281       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.71G    0.06325    0.08024    0.02777        129        640: 1\n",
      "tensor([0.87194], device='cuda:0', grad_fn=<AddBackward0>) tensor(1777.79858, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.391      0.296      0.282      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.71G    0.06255    0.07893    0.02748        142        640: 1\n",
      "tensor([0.72250], device='cuda:0', grad_fn=<AddBackward0>) tensor(2058.27661, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.405        0.3      0.287      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.71G    0.06282    0.07961    0.02737         96        640: 1\n",
      "tensor([0.77188], device='cuda:0', grad_fn=<AddBackward0>) tensor(1767.33252, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.398      0.304      0.288      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.71G    0.06276    0.07943    0.02732        183        640: 1\n",
      "tensor([0.94263], device='cuda:0', grad_fn=<AddBackward0>) tensor(2074.49536, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403      0.302      0.289      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.71G    0.06222    0.07882    0.02707        165        640: 1\n",
      "tensor([0.82968], device='cuda:0', grad_fn=<AddBackward0>) tensor(1908.54663, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.303       0.29      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.71G    0.06224     0.0796    0.02681         92        640: 1\n",
      "tensor([0.72662], device='cuda:0', grad_fn=<AddBackward0>) tensor(1779.13623, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.401      0.305      0.291      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.71G    0.06229     0.0789    0.02676        221        640: 1\n",
      "tensor([0.99752], device='cuda:0', grad_fn=<AddBackward0>) tensor(2086.74268, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.402      0.303       0.29      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.71G    0.06184    0.07823    0.02651        343        640: 1\n",
      "tensor([0.92229], device='cuda:0', grad_fn=<AddBackward0>) tensor(2035.06177, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.398      0.309      0.294       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.71G    0.06159    0.07841    0.02635        415        640: 1\n",
      "tensor([1.19678], device='cuda:0', grad_fn=<AddBackward0>) tensor(2311.52954, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.305      0.295       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.71G    0.06153    0.07806    0.02634         80        640: 1\n",
      "tensor([0.71865], device='cuda:0', grad_fn=<AddBackward0>) tensor(1893.54688, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.402       0.31      0.294       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.71G     0.0612    0.07785    0.02599        170        640: 1\n",
      "tensor([0.83167], device='cuda:0', grad_fn=<AddBackward0>) tensor(1954.69507, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409      0.309      0.298      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.71G    0.06139    0.07896    0.02603        404        640: 1\n",
      "tensor([0.98469], device='cuda:0', grad_fn=<AddBackward0>) tensor(2048.81250, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.313      0.299      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.71G     0.0613     0.0773    0.02592        275        640: 1\n",
      "tensor([1.07304], device='cuda:0', grad_fn=<AddBackward0>) tensor(2166.78760, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409       0.31      0.298      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.71G    0.06132    0.07764    0.02581        219        640: 1\n",
      "tensor([0.92522], device='cuda:0', grad_fn=<AddBackward0>) tensor(2051.17114, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.412       0.31      0.301      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.71G    0.06087    0.07756    0.02564        179        640: 1\n",
      "tensor([0.85806], device='cuda:0', grad_fn=<AddBackward0>) tensor(2114.93628, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759       0.41      0.311        0.3      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.71G    0.06068    0.07632    0.02541         44        640: 1\n",
      "tensor([0.56437], device='cuda:0', grad_fn=<AddBackward0>) tensor(1431.92407, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.414      0.309      0.299      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.71G    0.06081    0.07803    0.02538         32        640: 1\n",
      "tensor([0.58124], device='cuda:0', grad_fn=<AddBackward0>) tensor(1593.45435, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.408      0.315      0.299      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.71G     0.0605     0.0772    0.02524        240        640: 1\n",
      "tensor([0.97151], device='cuda:0', grad_fn=<AddBackward0>) tensor(2109.03589, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.411      0.316      0.301      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.71G    0.06031    0.07639    0.02497        229        640: 1\n",
      "tensor([0.90542], device='cuda:0', grad_fn=<AddBackward0>) tensor(2016.64404, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.413      0.316      0.302      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.71G    0.06022    0.07683      0.025        617        640: 1\n",
      "tensor([1.43138], device='cuda:0', grad_fn=<AddBackward0>) tensor(2728.67383, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.413      0.317      0.302      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.71G    0.05994    0.07582    0.02492         53        640: 1\n",
      "tensor([0.54467], device='cuda:0', grad_fn=<AddBackward0>) tensor(1790.71082, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.412      0.318      0.302      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.71G    0.05993    0.07545     0.0248        157        640: 1\n",
      "tensor([0.80153], device='cuda:0', grad_fn=<AddBackward0>) tensor(2197.25098, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.409       0.32      0.302      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.71G     0.0596     0.0756    0.02449         38        640: 1\n",
      "tensor([0.58721], device='cuda:0', grad_fn=<AddBackward0>) tensor(1449.49658, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.412      0.318      0.304      0.167\n",
      "\n",
      "50 epochs completed in 1.234 hours.\n",
      "Optimizer stripped from runs/train/replay_vis_k_v_o_Lwf/weights/last.pt, 14.5MB\n",
      "Optimizer stripped from runs/train/replay_vis_k_v_o_Lwf/weights/best.pt, 14.5MB\n",
      "\n",
      "Validating runs/train/replay_vis_k_v_o_Lwf/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        548      38759      0.403        0.3      0.287       0.16\n",
      "                   car        548      14064      0.614      0.703      0.713      0.473\n",
      "                   van        548       1975       0.38      0.338      0.307      0.206\n",
      "                 truck        548        750      0.404      0.293      0.286      0.181\n",
      "                person        548       5125      0.393       0.26      0.229     0.0744\n",
      "               bicycle        548       1287      0.174      0.124     0.0742     0.0257\n",
      "                   bus        548        251      0.531      0.382      0.385      0.257\n",
      "             motorbike        548       4886      0.433      0.341        0.3      0.109\n",
      "            pedestrian        548       8844      0.457      0.341       0.35      0.146\n",
      "              tricycle        548       1045      0.367      0.119      0.127     0.0635\n",
      "       awning-tricycle        548        532      0.276     0.0959     0.0975     0.0604\n",
      "Results saved to \u001b[1mruns/train/replay_vis_k_v_o_Lwf\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : replay_vis_k_v_o_Lwf\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/34de7b2535ca4cb98cfc046c94bf9fe5\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_f1              : 0.14230066513318645\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_false_positives : 134.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5          : 0.09753827614412756\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_mAP@.5:.95      : 0.0603987352307361\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_precision       : 0.2759857463221153\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_recall          : 0.09586466165413533\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_support         : 532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     awning-tricycle_true_positives  : 51.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                      : 0.14437858099538664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives         : 757.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                  : 0.07423801734693168\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95              : 0.025696531719524997\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision               : 0.1736674734008115\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                  : 0.12354312354312354\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support                 : 1287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives          : 159.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                          : 0.4445387017814347\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives             : 85.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                      : 0.38518022104067196\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                  : 0.2567388839084442\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                   : 0.5306552891574376\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                      : 0.38247011952191234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                     : 251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives              : 96.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                          : 0.6553526350213477\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives             : 6226.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                      : 0.7127472829406691\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                  : 0.47276912167836216\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                   : 0.6136453213018422\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                      : 0.7031427758816837\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                     : 14064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives              : 9889.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [3495]                     : (0.7225030064582825, 6.598398208618164)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]           : (0.06385510817393544, 0.30387842082112837)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]      : (0.02724532963000504, 0.16673544322096742)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]         : (0.04118739742068587, 0.5020925800882141)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]            : (0.1599380448768352, 0.31959492623296604)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_f1                    : 0.381548802985058\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_false_positives       : 2177.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5                : 0.2996108467678702\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_mAP@.5:.95            : 0.10931758083522351\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_precision             : 0.4334147380950477\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_recall                : 0.3407695456406058\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_support               : 4886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     motorbike_true_positives        : 1665.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_f1                   : 0.39023007522788034\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_false_positives      : 3585.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5               : 0.3500336974329538\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_mAP@.5:.95           : 0.14600196916026792\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_precision            : 0.4566416169875657\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_recall               : 0.3406829488919041\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_support              : 8844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pedestrian_true_positives       : 3013.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                       : 0.31287935772792297\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives          : 2053.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                   : 0.22896293285177727\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95               : 0.07441760139918982\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision                : 0.3933523706546289\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                   : 0.25974091164335067\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                  : 5125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives           : 1331.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]            : (0.059598658233881, 0.09686277061700821)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]            : (0.024486951529979706, 0.06562415510416031)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]            : (0.07545213401317596, 0.0894424319267273)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_f1                     : 0.17929805154537104\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_false_positives        : 214.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5                 : 0.1266471644479937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_mAP@.5:.95             : 0.06351821060453391\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_precision              : 0.36667758156299113\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_recall                 : 0.11866028708133972\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_support                : 1045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tricycle_true_positives         : 124.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                        : 0.3399359177738531\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives           : 324.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                    : 0.28638504780711194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95                : 0.18070788736484955\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                 : 0.40414312495622084\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                    : 0.29333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                   : 750\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives            : 220.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]              : (0.058719977736473083, 0.07966304570436478)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]              : (0.028890371322631836, 0.05143394693732262)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]              : (0.17170625925064087, 0.1854713261127472)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                          : 0.35762067131045533\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives             : 1089.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                      : 0.30692999391804465\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                  : 0.2059877298592164\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                   : 0.37990131744272354\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                      : 0.33780869477072006\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                     : 1975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives              : 667.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                     : (0.0004960000000000005, 0.07004291845493563)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                     : (0.0004960000000000005, 0.009599420123986648)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                     : (0.0004960000000000005, 0.009599420123986648)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : replay_vis_k_v_o_Lwf\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/34de7b2535ca4cb98cfc046c94bf9fe5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/replay_vis_k_v_o_Lwf\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.18 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for metadata to finish uploading (timeout is 3600 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Uploading 228 metrics, params and output messages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Still uploading 7 file(s), remaining 14.89 KB/2.02 MB\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/kvo_VisDrone_incremental.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "--name replay_vis_k_v_o_Lwf \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/k_v_o_replay_DER/weights/last.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "2e5b2bc5-17d6-4c01-bc86-da7b4d67b860",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VisDrone_incremental.yaml, weights=['runs/train/replay_vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VisDrone/VisDrone2019-DET-test-dev/labe\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   WARNING ⚠️ NMS time limit 2.100s exceeded\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1610      75102      0.345      0.285      0.246       0.13\n",
      "                   car       1610      28074       0.56       0.68      0.661      0.391\n",
      "                   van       1610       5771       0.27      0.413      0.272      0.164\n",
      "                 truck       1610       2659      0.301      0.373      0.282      0.155\n",
      "                person       1610       6376      0.297      0.127     0.0929     0.0264\n",
      "               bicycle       1610       1302      0.182     0.0685     0.0594     0.0199\n",
      "                   bus       1610       2940      0.605      0.492      0.494      0.309\n",
      "             motorbike       1610       5845      0.354      0.255      0.203     0.0715\n",
      "            pedestrian       1610      21006      0.404      0.219       0.23     0.0872\n",
      "              tricycle       1610        530        0.2      0.128     0.0853     0.0408\n",
      "       awning-tricycle       1610        599      0.281     0.0985     0.0769     0.0382\n",
      "Speed: 0.1ms pre-process, 4.0ms inference, 28.6ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp335\u001b[0m\n",
      "Vis\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/replay_vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.588      0.379      0.424      0.308\n",
      "                   car        600        113       0.58       0.46      0.489      0.361\n",
      "                   van        600          6      0.157      0.167     0.0588     0.0518\n",
      "                 truck        600         17      0.826      0.412      0.589      0.523\n",
      "                person        600       1131      0.558      0.249      0.324      0.167\n",
      "               bicycle        600         43      0.666      0.326        0.4      0.245\n",
      "                  bird        600         61      0.884      0.498      0.588      0.385\n",
      "                  boat        600         82        0.7      0.314       0.41      0.235\n",
      "                bottle        600          1          0          0     0.0276     0.0111\n",
      "                   bus        600          3      0.348      0.718      0.806      0.665\n",
      "                   cat        600          5      0.559        0.6      0.645      0.337\n",
      "                 chair        600         12      0.436      0.417      0.363      0.212\n",
      "                   dog        600         25      0.617      0.775      0.707      0.532\n",
      "                 horse        600         37      0.578      0.703      0.646      0.365\n",
      "                 sheep        600          8      0.648      0.625      0.604      0.515\n",
      "                 train        600          2          0          0      0.013     0.0103\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1      0.433          1      0.995      0.995\n",
      "                monkey        600         16      0.888      0.495      0.833      0.572\n",
      "                   pig        600          7      0.767      0.571      0.763      0.562\n",
      "                   toy        600         42      0.287     0.0205     0.0584     0.0327\n",
      "         traffic light        600          5          1          0   0.000901   0.000721\n",
      "          traffic sign        600          1          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 2.6ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp336\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/replay_vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.779      0.626      0.667      0.432\n",
      "                   car       4952       1201      0.843      0.817       0.86      0.647\n",
      "                person       4952       4528      0.819      0.732      0.802       0.49\n",
      "             aeroplane       4952        285      0.909       0.46       0.59      0.369\n",
      "               bicycle       4952        337      0.841      0.733      0.776      0.506\n",
      "                  bird       4952        459      0.801      0.586      0.613      0.365\n",
      "                  boat       4952        263      0.676      0.477      0.494       0.27\n",
      "                bottle       4952        469      0.842      0.352      0.456      0.275\n",
      "                   bus       4952        213      0.831      0.779        0.8      0.638\n",
      "                   cat       4952        358      0.825      0.782      0.786      0.504\n",
      "                 chair       4952        756      0.684      0.418      0.478      0.283\n",
      "                   cow       4952        244      0.758      0.648        0.7      0.487\n",
      "           diningtable       4952        206       0.71      0.592      0.622      0.375\n",
      "                   dog       4952        489      0.773      0.658      0.709       0.44\n",
      "                 horse       4952        348      0.848      0.772      0.815      0.558\n",
      "             motorbike       4952        325      0.814      0.677      0.743      0.465\n",
      "           pottedplant       4952        480      0.688      0.452      0.434      0.201\n",
      "                 sheep       4952        242      0.746       0.62      0.668       0.45\n",
      "                  sofa       4952        239       0.61      0.621      0.637       0.43\n",
      "                 train       4952        282      0.826      0.716      0.731      0.467\n",
      "             tvmonitor       4952        308      0.739      0.626       0.63      0.419\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp337\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/replay_vis_k_v_o_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.664      0.433      0.485      0.294\n",
      "                   car       2244       8711      0.778      0.864      0.889      0.609\n",
      "                   van       2244        861      0.635      0.475      0.531      0.345\n",
      "                 truck       2244        333      0.717      0.714      0.757      0.533\n",
      "                  tram       2244        138      0.763      0.399      0.534      0.296\n",
      "                person       2244       1286      0.708      0.547      0.595      0.278\n",
      "        person_sitting       2244         89          1          0     0.0126    0.00471\n",
      "               cyclist       2244        496      0.633      0.458      0.497      0.248\n",
      "                  misc       2244        284     0.0778    0.00704     0.0648     0.0357\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp338\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/replay_vis_k_v_o_Lwf/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VisDrone_incremental.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Vis' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca2c2e08-013d-4965-849e-cac9e304d420",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
