{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9edf42d0-53b9-4b4c-af0e-16410fa7096d",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setup complete ✅ (112 CPUs, 503.5 GB RAM, 28.3/30.0 GB disk)\n"
     ]
    }
   ],
   "source": [
    "import comet_ml\n",
    "import torch\n",
    "import utils\n",
    "\n",
    "comet_ml.init(project_name='exp_100epoch')\n",
    "# 这里应该会包含100epoch的0,0.6,1.2加雾以及各个以100epoch为单位的增量\n",
    "display = utils.notebook_init()  # checks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "2af189af-0738-4dc2-b8ef-ee93e25688e1",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=openimage_base, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/32fc6a6d2b7e4b5baeaf96930566b3cf\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from yolov5s.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/openimage_base/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/openimage_base\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.65G     0.0815     0.0387    0.05297         40        640: 1\n",
      "tensor([0.95394], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.789     0.0862      0.089     0.0472\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      5.75G    0.06224    0.03498    0.02661         63        640: 1\n",
      "tensor([1.19725], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.701      0.212      0.198      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      5.75G    0.06046    0.03469    0.02224         57        640: 1\n",
      "tensor([0.94385], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.586      0.176      0.172     0.0854\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      5.75G    0.05778    0.03625    0.02024         42        640: 1\n",
      "tensor([0.82159], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.264      0.223      0.125\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      5.75G    0.05608    0.03596    0.01914         36        640: 1\n",
      "tensor([0.87945], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.436      0.286      0.231      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      5.75G     0.0553    0.03706    0.01775         39        640: 1\n",
      "tensor([0.80984], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.386      0.294      0.224       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      5.75G    0.05407    0.03671    0.01634         68        640: 1\n",
      "tensor([1.15840], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.623      0.256      0.233      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      5.75G    0.05371    0.03641    0.01666         31        640: 1\n",
      "tensor([0.69075], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.547      0.295      0.277      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      5.75G    0.05313    0.03629     0.0161         35        640: 1\n",
      "tensor([0.78013], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.471      0.309      0.251      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      5.75G    0.05209    0.03623    0.01475         42        640: 1\n",
      "tensor([0.88560], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.462      0.288      0.242      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      5.75G    0.05142    0.03547    0.01411         38        640: 1\n",
      "tensor([0.78798], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.367      0.344      0.291      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      5.75G     0.0509    0.03579    0.01318         59        640: 1\n",
      "tensor([0.96003], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.385      0.322       0.27      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      5.75G    0.05025    0.03538    0.01361         46        640: 1\n",
      "tensor([0.80785], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.497      0.318      0.289      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      5.75G    0.05011    0.03486    0.01254         47        640: 1\n",
      "tensor([0.85835], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.416       0.35      0.304      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      5.75G    0.04934    0.03435    0.01242         32        640: 1\n",
      "tensor([0.69843], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.498      0.351      0.322      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      5.75G    0.04863     0.0348    0.01132         48        640: 1\n",
      "tensor([0.85302], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.413      0.369      0.313      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      5.75G    0.04871    0.03401    0.01129         43        640: 1\n",
      "tensor([0.79997], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.449      0.307      0.284      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      5.75G     0.0482    0.03445    0.01109         64        640: 1\n",
      "tensor([0.87198], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.355      0.384      0.329      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      5.75G    0.04778    0.03423    0.01113         61        640: 1\n",
      "tensor([0.78925], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.459      0.377      0.333      0.194\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      5.75G    0.04723    0.03438    0.01064         29        640: 1\n",
      "tensor([0.68776], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.413      0.376      0.325      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      5.75G    0.04661     0.0328    0.01001         39        640: 1\n",
      "tensor([0.79357], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.508      0.354      0.344      0.206\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      5.75G    0.04587      0.033   0.009932         40        640: 1\n",
      "tensor([0.68437], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.421      0.354      0.325      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      5.75G    0.04569    0.03284   0.009672         44        640: 1\n",
      "tensor([0.79967], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.485      0.366      0.353      0.203\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      5.75G    0.04546    0.03313   0.009722         31        640: 1\n",
      "tensor([0.55890], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.565      0.379       0.37      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      5.75G    0.04519    0.03261   0.009718         72        640: 1\n",
      "tensor([0.75503], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.385      0.403      0.355      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      5.75G    0.04484    0.03316   0.009948         41        640: 1\n",
      "tensor([0.64564], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.441      0.395      0.345      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      5.75G    0.04478    0.03255   0.009182         33        640: 1\n",
      "tensor([0.78027], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.405       0.43      0.368      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      5.75G    0.04467    0.03194   0.008745         30        640: 1\n",
      "tensor([0.63945], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521      0.336       0.36      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      5.75G    0.04435     0.0318   0.008639         30        640: 1\n",
      "tensor([0.62149], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.512      0.391      0.377      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      5.75G    0.04363    0.03161    0.00838         43        640: 1\n",
      "tensor([0.74505], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.362      0.372       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      5.75G    0.04373    0.03152   0.008383         62        640: 1\n",
      "tensor([0.85401], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.586      0.375       0.36      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      5.75G     0.0435     0.0312   0.008584         40        640: 1\n",
      "tensor([0.61692], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.513      0.375      0.372      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      5.75G    0.04284    0.03097    0.00783         37        640: 1\n",
      "tensor([0.55522], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233        0.5      0.364      0.381      0.241\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      5.75G    0.04223    0.03054   0.008161         45        640: 1\n",
      "tensor([0.75585], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.457      0.373      0.369      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      5.75G    0.04264    0.03118   0.007616         70        640: 1\n",
      "tensor([0.70289], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.502      0.431      0.398      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      5.75G    0.04217    0.03046   0.007622         34        640: 1\n",
      "tensor([0.58293], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551       0.38      0.384      0.238\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      5.75G     0.0416    0.03015   0.007724         52        640: 1\n",
      "tensor([0.67824], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.529      0.381      0.374      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      5.75G    0.04141    0.03031   0.007425         46        640: 1\n",
      "tensor([0.66315], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.534      0.432      0.414      0.256\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      5.75G    0.04071    0.02962   0.007331         42        640: 1\n",
      "tensor([0.66077], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.562      0.419      0.402      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      5.75G    0.04143    0.02981   0.007269         56        640: 1\n",
      "tensor([0.77148], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.547      0.417      0.397      0.245\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      5.75G    0.04112    0.02958   0.007253         47        640: 1\n",
      "tensor([0.75042], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572      0.383      0.389      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      5.75G    0.04069    0.02963   0.006958         30        640: 1\n",
      "tensor([0.51085], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.558      0.399      0.403      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      5.75G     0.0403    0.02925   0.006925         31        640: 1\n",
      "tensor([0.49822], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.555      0.392      0.412      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      5.75G    0.04054    0.02961   0.006682         16        640: 1\n",
      "tensor([0.41258], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.408      0.406      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      5.75G    0.04001     0.0292   0.006556         39        640: 1\n",
      "tensor([0.64877], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.564      0.399      0.421       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      5.75G    0.03951    0.02898   0.006545         45        640: 1\n",
      "tensor([0.60089], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.601      0.375      0.395      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      5.75G     0.0395    0.02919   0.006795         50        640: 1\n",
      "tensor([0.59722], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.399       0.39      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      5.75G    0.03955    0.02857   0.006402         28        640: 1\n",
      "tensor([0.57306], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591       0.36       0.41      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      5.75G    0.03878    0.02878   0.006466         36        640: 1\n",
      "tensor([0.53750], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482      0.421      0.402      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      5.75G    0.03878    0.02825   0.006221         66        640: 1\n",
      "tensor([0.75872], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.417      0.415      0.263\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      5.75G    0.03845    0.02791   0.006166         54        640: 1\n",
      "tensor([0.68862], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.425      0.406      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/99      5.75G     0.0384    0.02825   0.006133         41        640: 1\n",
      "tensor([0.64870], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.596      0.405      0.411      0.259\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/99      5.75G      0.038    0.02776   0.005957         45        640: 1\n",
      "tensor([0.55983], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.543      0.395      0.396      0.252\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/99      5.75G    0.03764    0.02777   0.005934         67        640: 1\n",
      "tensor([0.67333], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.615      0.395      0.392      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/99      5.75G    0.03768    0.02783   0.006189         61        640: 1\n",
      "tensor([0.65987], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.661      0.393       0.42      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/99      5.75G    0.03742    0.02689   0.005675         55        640: 1\n",
      "tensor([0.72688], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.58      0.428      0.422      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/99      5.75G    0.03733    0.02704   0.005765         34        640: 1\n",
      "tensor([0.60376], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.662       0.37      0.416      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/99      5.75G    0.03707     0.0272   0.005523         36        640: 1\n",
      "tensor([0.56244], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.467      0.434      0.407      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/99      5.75G    0.03667    0.02722   0.005638         52        640: 1\n",
      "tensor([0.64876], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.601      0.432      0.417      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/99      5.75G    0.03674    0.02644   0.005492         31        640: 1\n",
      "tensor([0.44910], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.552      0.413       0.41      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/99      5.75G    0.03621    0.02648   0.005367         49        640: 1\n",
      "tensor([0.61403], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572      0.402      0.413      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/99      5.75G    0.03635    0.02621   0.005459         52        640: 1\n",
      "tensor([0.64438], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.599      0.399       0.42      0.285\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/99      5.75G    0.03633    0.02649   0.005435         41        640: 1\n",
      "tensor([0.63935], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.593      0.395      0.392      0.258\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/99      5.75G    0.03596    0.02586   0.005345         34        640: 1\n",
      "tensor([0.48001], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.575      0.395      0.414      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/99      5.75G    0.03554    0.02546   0.005251         37        640: 1\n",
      "tensor([0.56643], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.412      0.406      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/99      5.75G    0.03567     0.0258   0.005055         33        640: 1\n",
      "tensor([0.48297], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.562      0.406      0.417      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/99      5.75G    0.03535    0.02559    0.00506         20        640: 1\n",
      "tensor([0.55434], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.586      0.391      0.401      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/99      5.75G    0.03508     0.0259   0.005212         49        640: 1\n",
      "tensor([0.65387], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.385      0.412      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/99      5.75G    0.03456    0.02546   0.004834         93        640: 1\n",
      "tensor([0.79789], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.557      0.402      0.415      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/99      5.75G    0.03469    0.02527   0.004804         32        640: 1\n",
      "tensor([0.43534], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591      0.383      0.408      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/99      5.75G    0.03414    0.02498   0.004987         47        640: 1\n",
      "tensor([0.58613], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591      0.397      0.407      0.274\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/99      5.75G    0.03446    0.02546   0.004795         43        640: 1\n",
      "tensor([0.46756], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.604      0.402      0.408      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/99      5.75G    0.03413    0.02463   0.004544         37        640: 1\n",
      "tensor([0.49335], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.676      0.394       0.42      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/99      5.75G    0.03405    0.02486   0.004663         53        640: 1\n",
      "tensor([0.60801], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.54      0.413      0.411      0.282\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/99      5.75G    0.03378    0.02494   0.004533         59        640: 1\n",
      "tensor([0.56473], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.582      0.398      0.409      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/99      5.75G    0.03336    0.02416   0.004583         26        640: 1\n",
      "tensor([0.35272], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.68      0.394      0.417      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/99      5.75G    0.03363    0.02437   0.004449         54        640: 1\n",
      "tensor([0.58489], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.582      0.407      0.409      0.283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/99      5.75G    0.03298    0.02393   0.004338         31        640: 1\n",
      "tensor([0.48911], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551      0.417      0.413      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/99      5.75G    0.03318    0.02409   0.004299         56        640: 1\n",
      "tensor([0.53642], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.399      0.407      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/99      5.75G    0.03261    0.02367   0.004408         45        640: 1\n",
      "tensor([0.48115], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.679      0.375      0.405      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      80/99      5.75G    0.03227     0.0237    0.00428         69        640:  "
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights yolov5s.pt \\\n",
    "--name openimage_base \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "7f84e43b-31cd-40db-a804-0ba2b5e6af0e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/openimage_base/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.686      0.425      0.423      0.291\n",
      "                   car        600        113      0.608       0.44       0.48      0.327\n",
      "                   van        600          6          1          0     0.0499     0.0423\n",
      "                 truck        600         17      0.916      0.647      0.733      0.597\n",
      "                person        600       1131      0.546      0.278       0.33      0.169\n",
      "               bicycle        600         43      0.757      0.362      0.436      0.297\n",
      "                  bird        600         61      0.714      0.426      0.506      0.357\n",
      "                  boat        600         82      0.806       0.39      0.499      0.299\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.763      0.333      0.338      0.236\n",
      "                   cat        600          5          1          0      0.371      0.256\n",
      "                 chair        600         12      0.436      0.333      0.356      0.228\n",
      "                   dog        600         25      0.729       0.48      0.632      0.407\n",
      "                 horse        600         37      0.879      0.622      0.738       0.44\n",
      "                 sheep        600          8      0.885      0.625       0.63      0.522\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          0          0      0.087     0.0609\n",
      "                rabbit        600          1      0.178          1      0.199      0.179\n",
      "                monkey        600         16      0.803      0.766      0.765      0.473\n",
      "                   pig        600          7      0.963      0.857      0.925      0.693\n",
      "                   toy        600         42          0          0     0.0986     0.0421\n",
      "         traffic light        600          5       0.81        0.8        0.8      0.511\n",
      "          traffic sign        600          1      0.299          1      0.332      0.265\n",
      "Speed: 0.1ms pre-process, 2.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp216\u001b[0m\n",
      "openimages\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/openimage_base/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4bfd4ddc-83aa-41b1-88ff-0175260d1704",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "11f90d64-128e-4f0e-9070-19e1958a8625",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "75e92889-4e35-4592-8903-32a1c8782619",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6316a485-b418-4095-9596-f9d722e5a110",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f29c2db6-e796-4225-adfe-4600c3059753",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_2_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/38bf277a51dc46498233dcfa393473b5\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_2_openimages/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_2_openimages\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.65G    0.08476    0.04259    0.06219         40        640: 1\n",
      "tensor([1.11867], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.91     0.0259     0.0456      0.022\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.75G    0.06171    0.03809    0.03254         63        640: 1\n",
      "tensor([1.22331], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.744      0.169      0.157     0.0878\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.75G    0.05842    0.03627     0.0249         57        640: 1\n",
      "tensor([0.91513], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.75        0.2      0.222      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.75G    0.05482    0.03638    0.02059         42        640: 1\n",
      "tensor([0.74061], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.554      0.302      0.265      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.75G    0.05232     0.0353    0.01789         36        640: 1\n",
      "tensor([0.87062], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.628      0.298      0.292      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.75G    0.05112    0.03605    0.01594         39        640: 1\n",
      "tensor([0.81034], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.647      0.298      0.288      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.75G    0.05027    0.03538    0.01451         68        640: 1\n",
      "tensor([1.15720], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572      0.322      0.285      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.75G    0.04973     0.0352    0.01489         31        640: 1\n",
      "tensor([0.61558], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.332      0.288      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.75G    0.04926      0.035    0.01423         35        640: 1\n",
      "tensor([0.75511], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.54      0.336      0.296      0.181\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.75G     0.0484    0.03479    0.01288         42        640: 1\n",
      "tensor([0.78960], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.603      0.354      0.314      0.194\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.75G    0.04809    0.03412    0.01255         38        640: 1\n",
      "tensor([0.72804], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.351      0.295      0.182\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.75G    0.04743    0.03443    0.01179         59        640: 1\n",
      "tensor([0.89200], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.63      0.329      0.319      0.196\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.75G    0.04688    0.03398    0.01223         46        640: 1\n",
      "tensor([0.76126], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.481      0.364      0.303      0.187\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.75G    0.04697     0.0336    0.01122         47        640: 1\n",
      "tensor([0.78319], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.352      0.325      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.75G    0.04618    0.03302    0.01111         32        640: 1\n",
      "tensor([0.60312], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.599      0.378      0.348      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.75G     0.0457    0.03348    0.01035         48        640: 1\n",
      "tensor([0.79499], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.356      0.352       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.75G    0.04576    0.03275    0.01043         43        640: 1\n",
      "tensor([0.76473], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.511      0.343      0.335      0.208\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.75G    0.04542    0.03313   0.009946         64        640: 1\n",
      "tensor([0.80161], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.626      0.344       0.34       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.75G    0.04495     0.0329    0.01005         61        640: 1\n",
      "tensor([0.72081], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.672      0.343      0.359      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.75G    0.04464    0.03309   0.009552         29        640: 1\n",
      "tensor([0.64184], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.614      0.382      0.324       0.21\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.75G    0.04412    0.03173   0.009102         39        640: 1\n",
      "tensor([0.79821], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.561      0.374      0.347      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.75G    0.04356    0.03201   0.009573         40        640: 1\n",
      "tensor([0.62563], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.576      0.394      0.345      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.75G    0.04344    0.03181   0.009237         44        640: 1\n",
      "tensor([0.78053], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551      0.429      0.374      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79      5.75G     0.0431    0.03204   0.008977         31        640: 1\n",
      "tensor([0.57502], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.618      0.393      0.369       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.75G    0.04261    0.03136   0.008656         72        640: 1\n",
      "tensor([0.70439], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.401       0.38      0.245\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.75G    0.04231    0.03169   0.009085         41        640: 1\n",
      "tensor([0.61319], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.618      0.358      0.376      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.75G    0.04285    0.03165   0.008504         33        640: 1\n",
      "tensor([0.73104], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.586      0.389       0.36      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.75G    0.04232    0.03106   0.008119         30        640: 1\n",
      "tensor([0.50718], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.519      0.377      0.346      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.75G    0.04206    0.03068   0.007949         30        640: 1\n",
      "tensor([0.50999], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.625       0.38      0.382      0.256\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.75G    0.04121     0.0303   0.007638         43        640: 1\n",
      "tensor([0.67191], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.472      0.407      0.377      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.75G    0.04164    0.03038   0.007792         62        640: 1\n",
      "tensor([0.79939], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.501      0.379      0.375      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.75G    0.04145    0.03019   0.007888         40        640: 1\n",
      "tensor([0.58299], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.484      0.401      0.383      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.75G    0.04106    0.03008   0.007473         37        640: 1\n",
      "tensor([0.55095], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.397      0.376      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.75G    0.04003    0.02957   0.007709         45        640: 1\n",
      "tensor([0.79343], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.343      0.376      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.75G    0.04083    0.03023   0.007157         70        640: 1\n",
      "tensor([0.68436], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.554      0.385      0.389      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.75G    0.04027    0.02949   0.007242         34        640: 1\n",
      "tensor([0.58375], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.421      0.404      0.367      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.75G    0.03985    0.02928   0.007239         52        640: 1\n",
      "tensor([0.65285], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.582      0.383      0.396      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.75G    0.03971    0.02932   0.007167         46        640: 1\n",
      "tensor([0.61807], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.386      0.398      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.75G    0.03885    0.02881   0.006903         42        640: 1\n",
      "tensor([0.64129], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.388      0.393      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.75G     0.0394     0.0288   0.006761         56        640: 1\n",
      "tensor([0.70503], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.497      0.407      0.415      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.75G    0.03931    0.02871   0.006857         47        640: 1\n",
      "tensor([0.69108], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.511      0.415      0.408      0.269\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.75G    0.03885    0.02869    0.00653         30        640: 1\n",
      "tensor([0.45664], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.498      0.404      0.408       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.75G    0.03867    0.02832   0.006586         31        640: 1\n",
      "tensor([0.46177], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.548       0.43      0.419      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.75G    0.03889    0.02868   0.006246         16        640: 1\n",
      "tensor([0.42677], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.49      0.455      0.403      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.75G    0.03833    0.02848   0.006238         39        640: 1\n",
      "tensor([0.59608], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.483      0.449      0.409      0.274\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.75G    0.03778      0.028   0.005979         45        640: 1\n",
      "tensor([0.52851], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.43      0.414      0.394      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.75G    0.03761    0.02812   0.006276         50        640: 1\n",
      "tensor([0.58609], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.483      0.451      0.414      0.276\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.75G    0.03787    0.02782   0.006017         28        640: 1\n",
      "tensor([0.55644], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.548      0.436      0.415      0.275\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.75G    0.03701    0.02781    0.00605         36        640: 1\n",
      "tensor([0.58782], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572       0.41      0.428      0.285\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.75G    0.03698    0.02722   0.005651         66        640: 1\n",
      "tensor([0.69287], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.525      0.386      0.399      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.75G    0.03658     0.0273   0.005914         54        640: 1\n",
      "tensor([0.65742], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.578      0.398      0.413      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.75G    0.03685    0.02739   0.005764         41        640: 1\n",
      "tensor([0.63952], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.547      0.402      0.419      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.75G    0.03624    0.02682   0.005758         45        640: 1\n",
      "tensor([0.52513], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.428      0.412      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.75G    0.03587    0.02669   0.005574         67        640: 1\n",
      "tensor([0.63163], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.516      0.427      0.426      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.75G    0.03581    0.02686   0.005602         61        640: 1\n",
      "tensor([0.63332], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.642      0.398      0.426      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.75G     0.0356    0.02603   0.005308         55        640: 1\n",
      "tensor([0.68716], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.552      0.415      0.426      0.276\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.75G     0.0357    0.02618   0.005284         34        640: 1\n",
      "tensor([0.55970], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.609      0.409      0.433      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.75G    0.03533    0.02622   0.005097         36        640: 1\n",
      "tensor([0.52954], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.562      0.428       0.44      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.75G    0.03494    0.02628   0.005212         52        640: 1\n",
      "tensor([0.64909], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.501      0.434      0.416      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.75G    0.03513    0.02565    0.00497         31        640: 1\n",
      "tensor([0.41120], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.545      0.441      0.414       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.75G    0.03457    0.02569   0.005042         49        640: 1\n",
      "tensor([0.57270], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.515      0.449      0.417      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.75G    0.03462    0.02538   0.004962         52        640: 1\n",
      "tensor([0.62170], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.561      0.414      0.423      0.287\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.75G    0.03467    0.02576   0.004968         41        640: 1\n",
      "tensor([0.61057], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.506      0.447      0.429      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.75G    0.03423    0.02508   0.005017         34        640: 1\n",
      "tensor([0.42884], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.597       0.43      0.429       0.29\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.75G    0.03369    0.02478   0.004825         37        640: 1\n",
      "tensor([0.56194], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.54      0.444      0.417      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.75G    0.03393    0.02498   0.004734         33        640: 1\n",
      "tensor([0.47215], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.428      0.422      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.75G    0.03386    0.02488   0.004891         20        640: 1\n",
      "tensor([0.46223], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.485      0.478      0.425      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.75G    0.03322    0.02512   0.004915         49        640: 1\n",
      "tensor([0.62094], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521      0.435      0.438      0.299\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.75G    0.03288    0.02458   0.004477         93        640: 1\n",
      "tensor([0.77302], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532       0.43      0.424      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.75G    0.03313    0.02473   0.004374         32        640: 1\n",
      "tensor([0.50481], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.592      0.431      0.431      0.294\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.75G    0.03259    0.02444   0.004575         47        640: 1\n",
      "tensor([0.53250], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.457       0.43      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.75G     0.0327    0.02477   0.004318         43        640: 1\n",
      "tensor([0.44266], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.453      0.442        0.3\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.75G     0.0325    0.02403   0.004074         37        640: 1\n",
      "tensor([0.48369], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572      0.413      0.442      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.75G    0.03252    0.02408   0.004296         53        640: 1\n",
      "tensor([0.57041], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.443       0.44      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.75G    0.03229    0.02427   0.004281         59        640: 1\n",
      "tensor([0.54687], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.427      0.444      0.307\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.75G     0.0318    0.02367   0.004334         26        640: 1\n",
      "tensor([0.31475], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.558      0.421      0.439      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.75G    0.03218    0.02382   0.004199         54        640: 1\n",
      "tensor([0.54988], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.519      0.447      0.448       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.75G    0.03162    0.02342   0.004109         31        640: 1\n",
      "tensor([0.51563], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.505      0.445      0.441      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.75G    0.03174    0.02367   0.004096         56        640: 1\n",
      "tensor([0.51581], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551      0.439      0.446      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.75G    0.03147    0.02334   0.004313         45        640: 1\n",
      "tensor([0.42385], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.529      0.442      0.439        0.3\n",
      "\n",
      "80 epochs completed in 0.703 hours.\n",
      "Optimizer stripped from runs/train/k_v_2_openimages/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_2_openimages/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_2_openimages/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.515      0.446      0.448       0.31\n",
      "                   car       1200        287      0.679      0.575      0.589      0.404\n",
      "                   van       1200         29      0.512      0.723      0.628       0.42\n",
      "                 truck       1200         29        0.4      0.483      0.435      0.324\n",
      "                person       1200       2264      0.459      0.356      0.317      0.167\n",
      "               bicycle       1200         54      0.549      0.407      0.446      0.271\n",
      "                  bird       1200        136      0.603      0.618      0.589      0.367\n",
      "                  boat       1200        145      0.637       0.49      0.505      0.288\n",
      "                bottle       1200         31          0          0    0.00262    0.00154\n",
      "                   bus       1200         15      0.617      0.867      0.833        0.7\n",
      "                   cat       1200          1          1          0     0.0284     0.0171\n",
      "                 chair       1200         21      0.142       0.19     0.0721     0.0266\n",
      "                   dog       1200         42      0.807      0.548       0.62      0.406\n",
      "                 horse       1200         44      0.786      0.584      0.718      0.517\n",
      "                 sheep       1200         10       0.65        0.5       0.55        0.3\n",
      "             billboard       1200          4          0          0      0.017    0.00749\n",
      "                rabbit       1200         11      0.558      0.455      0.567      0.426\n",
      "                monkey       1200         18      0.644      0.944      0.884      0.658\n",
      "                   pig       1200          6      0.598      0.833      0.789      0.693\n",
      "                   toy       1200         64      0.315      0.187       0.12     0.0632\n",
      "         traffic light       1200         18      0.584      0.111      0.328      0.158\n",
      "          traffic sign       1200          4      0.265        0.5      0.373      0.299\n",
      "Results saved to \u001b[1mruns/train/k_v_2_openimages\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/38bf277a51dc46498233dcfa393473b5\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.46767256075653607\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 18.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.4457818366205595\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.27098515472851037\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.5488622121506899\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.4074074074074074\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 22.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.016979166666666667\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.007493421052631578\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6101246712930328\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 55.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5894705838082865\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.3674074197498567\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.6027833108888265\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.6176470588235294\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 84.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5537071490989633\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 40.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.5045725009850595\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.2880901487874339\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.6370383781884957\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.4896551724137931\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 71.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0026200868261109793\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.0015378003642603846\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.7206568487583308\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.833469900240798\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.6997795616964735\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.616751033955335\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.8666666666666667\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.622536809738557\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 78.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.5889340414708517\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.403572452236301\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6787633634380403\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5749128919860628\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 165.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.028428571428571418\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.01705714285714285\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.16297782170750508\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.07209900057167468\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.02663165203572267\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.14241751293589613\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.19047619047619047\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6525521902706396\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6202044441192296\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.40569179936153316\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.8072315042061301\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5476190476190477\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 23.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.6704036295134719\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.7182066418148584\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.5172159276051614\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.7859953886952693\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.5844517222798846\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (0.4622308015823364, 3.892483711242676)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.045635043795171464, 0.4480557155043193)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.022014817397083396, 0.31040839801113734)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.4213059968173134, 0.909865909003752)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.025882471346751652, 0.47816586004428485)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.7661460551394624\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8835741845125148\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.6582804803962642\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.6444774027130253\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.9444444444444444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.4011955365851299\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 952.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.3169284711384194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.1672078406292376\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.45878966918302405\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.35644876325088337\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 807.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.6963314302410197\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.7889316389132338\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.6925522173531988\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.5980162010863765\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.8333333333333334\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.5011354961993795\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.5667776936836478\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.4264738825378352\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.5583670536303805\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.45454545454545453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.565066061785513\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.5498520427644137\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.3000079315843729\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.6495998726901505\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.23470589367074798\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.11956612980997126\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.06317174231326192\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.3151656635298171\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.18697291041041042\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.1867116526580157\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.3278322580645161\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.15790859632881396\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 0.5842130790584399\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.1111111111111111\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.34644377602392534\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.373469387755102\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.2987755102040816\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.2650451203082782\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.03147359937429428, 0.0847574919462204)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.004073713906109333, 0.06219187378883362)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.023341001942753792, 0.0425889790058136)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.4373136585764427\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.43455304228813263\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.3237388247702733\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.3996885774663553\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.4827586206896552\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.051028188318014145, 0.06433000415563583)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.011662562377750874, 0.035430461168289185)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.023311490193009377, 0.029570478945970535)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.5995494302380203\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.6277892792940124\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.4197056989018571\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.5119231842117157\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.723369003633127\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/38bf277a51dc46498233dcfa393473b5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : 0.0001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.39 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--name k_v_2_openimages \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "05ef18a8-fd24-4b64-ba63-40014359ce0e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.708      0.397       0.45      0.318\n",
      "                   car        600        113      0.649      0.416      0.463      0.321\n",
      "                   van        600          6      0.325      0.167      0.204      0.155\n",
      "                 truck        600         17        0.9      0.527      0.709      0.566\n",
      "                person        600       1131      0.537       0.26      0.325      0.176\n",
      "               bicycle        600         43       0.87      0.372      0.497        0.3\n",
      "                  bird        600         61      0.834      0.493      0.588      0.449\n",
      "                  boat        600         82      0.851      0.354      0.516      0.291\n",
      "                bottle        600          1          0          0          0          0\n",
      "                   bus        600          3      0.601      0.333      0.339      0.305\n",
      "                   cat        600          5          1          0      0.525      0.354\n",
      "                 chair        600         12      0.528      0.333      0.327      0.175\n",
      "                   dog        600         25      0.741        0.6      0.723      0.514\n",
      "                 horse        600         37      0.878      0.568      0.733      0.468\n",
      "                 sheep        600          8      0.842      0.625      0.637      0.533\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1      0.315          1      0.332      0.298\n",
      "                monkey        600         16       0.88       0.75      0.862       0.51\n",
      "                   pig        600          7      0.557      0.857      0.829      0.635\n",
      "                   toy        600         42      0.536     0.0714      0.196     0.0771\n",
      "         traffic light        600          5          1          0      0.111     0.0774\n",
      "          traffic sign        600          1      0.734          1      0.995      0.796\n",
      "Speed: 0.1ms pre-process, 2.9ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp221\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007... 4952 images, 0 b\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/VOC/labels/test2007.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.763      0.258      0.328      0.206\n",
      "                   car       4952       1201       0.74        0.7       0.75      0.539\n",
      "                person       4952       4528      0.479      0.515      0.463      0.265\n",
      "             aeroplane       4952        285          1          0          0          0\n",
      "               bicycle       4952        337      0.638       0.59      0.616       0.39\n",
      "                  bird       4952        459      0.749      0.521        0.6      0.353\n",
      "                  boat       4952        263      0.361      0.589      0.477      0.255\n",
      "                bottle       4952        469      0.881     0.0788      0.173      0.102\n",
      "                   bus       4952        213      0.776      0.484      0.621      0.472\n",
      "                   cat       4952        358          1          0      0.576      0.352\n",
      "                 chair       4952        756      0.533        0.2      0.262      0.149\n",
      "                   cow       4952        244          1          0     0.0503     0.0302\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489      0.735      0.454      0.592      0.357\n",
      "                 horse       4952        348       0.84      0.632      0.725      0.455\n",
      "             motorbike       4952        325          1          0          0          0\n",
      "           pottedplant       4952        480          1          0          0          0\n",
      "                 sheep       4952        242      0.536      0.401      0.437      0.271\n",
      "                  sofa       4952        239          1          0          0          0\n",
      "                 train       4952        282          1          0      0.221      0.138\n",
      "             tvmonitor       4952        308          0          0          0          0\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp222\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.724      0.141      0.147     0.0754\n",
      "                   car       2244       8711      0.729      0.482      0.566      0.287\n",
      "                   van       2244        861      0.452     0.0825      0.142     0.0888\n",
      "                 truck       2244        333      0.246      0.171      0.154      0.092\n",
      "                  tram       2244        138          1          0          0          0\n",
      "                person       2244       1286      0.364      0.396      0.315      0.136\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          1          0          0          0\n",
      "                  misc       2244        284          1          0          0          0\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp223\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# openimages\n",
    "model = f'runs/train/k_v_2_openimages/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a456f98b-cf91-4b87-a73e-fe3d80c24b22",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "01f2e5e2-a52e-4000-85ff-87b47afd5b8b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "caba60d9-7084-4cea-9a7b-155a7ebc1298",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "50bfb278-2aeb-4f12-bcf9-2cf2ce7949b4",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/increment_VOC_Lwf/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_vLwf_2_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/a2320291aeff42af8ceba1ce09e3631b\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_Lwf/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_vLwf_2_openimages/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_vLwf_2_openimages\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.63G    0.08343    0.04188     0.0595         40        640: 1\n",
      "tensor([1.05997], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.866     0.0803     0.0576     0.0272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.74G    0.06279    0.03853     0.0352        102        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': GnuTLS recv error (-110): The TLS connection was non-properly terminated.\n",
      "       1/79      5.74G    0.06251     0.0371     0.0314         63        640: 1\n",
      "tensor([1.24249], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.748      0.157      0.153     0.0777\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.74G     0.0592     0.0362    0.02461         57        640: 1\n",
      "tensor([0.92143], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.733      0.182      0.207      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.74G    0.05585    0.03652    0.02098         42        640: 1\n",
      "tensor([0.80333], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.61      0.237      0.193      0.108\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.74G    0.05342    0.03565    0.01873         36        640: 1\n",
      "tensor([0.87604], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.279      0.237      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.74G    0.05201    0.03652    0.01681         39        640: 1\n",
      "tensor([0.81362], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616      0.299      0.271      0.153\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.74G     0.0512    0.03566    0.01506         68        640: 1\n",
      "tensor([1.13819], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.558      0.301      0.254      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.74G    0.05054    0.03549     0.0152         31        640: 1\n",
      "tensor([0.64652], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.524      0.319      0.281      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.74G       0.05    0.03521    0.01455         35        640: 1\n",
      "tensor([0.76071], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.489      0.307       0.27       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.74G    0.04925    0.03504    0.01313         42        640: 1\n",
      "tensor([0.76499], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.51      0.374      0.287      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.74G    0.04851    0.03435    0.01269         38        640: 1\n",
      "tensor([0.76508], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.529      0.323      0.283      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.74G    0.04812    0.03469    0.01226         59        640: 1\n",
      "tensor([0.90199], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.516      0.385      0.287      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.74G    0.04738    0.03424    0.01244         46        640: 1\n",
      "tensor([0.78795], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.527      0.349      0.326      0.197\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.74G    0.04759    0.03361    0.01141         47        640: 1\n",
      "tensor([0.82410], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.555      0.329      0.302      0.185\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.74G    0.04659    0.03338    0.01145         32        640: 1\n",
      "tensor([0.62183], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.454      0.344      0.319      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.74G    0.04626    0.03367    0.01044         48        640: 1\n",
      "tensor([0.87505], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.342      0.321        0.2\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.74G     0.0459     0.0328    0.01047         43        640: 1\n",
      "tensor([0.74690], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.436      0.383      0.311      0.187\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.74G     0.0459    0.03319    0.01026         64        640: 1\n",
      "tensor([0.82156], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.515      0.342      0.329      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.74G    0.04536    0.03292    0.01011         61        640: 1\n",
      "tensor([0.79487], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.57      0.356      0.334      0.209\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.74G    0.04477    0.03304   0.009525         29        640: 1\n",
      "tensor([0.64672], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.368      0.315      0.198\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.74G    0.04439    0.03159   0.009198         39        640: 1\n",
      "tensor([0.75520], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.496       0.34      0.333      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.74G    0.04368    0.03193   0.009449         40        640: 1\n",
      "tensor([0.60434], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.49      0.366      0.321      0.194\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.74G     0.0434    0.03169   0.009223         44        640: 1\n",
      "tensor([0.76771], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.511      0.397      0.336      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79      5.74G    0.04324    0.03197   0.008929         31        640: 1\n",
      "tensor([0.60179], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.498      0.354       0.36      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.74G      0.043    0.03144   0.009111         72        640: 1\n",
      "tensor([0.72938], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.524      0.376      0.346      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.74G    0.04258    0.03169   0.009069         41        640: 1\n",
      "tensor([0.62680], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.373      0.381      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.74G    0.04255    0.03131   0.008448         33        640: 1\n",
      "tensor([0.76294], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.67      0.319      0.361      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.74G    0.04241     0.0308   0.008337         30        640: 1\n",
      "tensor([0.57734], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.509      0.368      0.362      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.74G    0.04222    0.03063   0.008138         30        640: 1\n",
      "tensor([0.52535], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.503      0.369      0.377       0.25\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.74G    0.04155    0.03035   0.007932         43        640: 1\n",
      "tensor([0.69943], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.456      0.357      0.349      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.74G     0.0418    0.03012   0.007529         62        640: 1\n",
      "tensor([0.86118], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.558      0.338      0.364       0.24\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.74G    0.04143    0.03017   0.007965         40        640: 1\n",
      "tensor([0.58587], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.401      0.386      0.336      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.74G    0.04109     0.0299   0.007509         37        640: 1\n",
      "tensor([0.47230], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.396      0.387      0.246\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.74G    0.04039     0.0293   0.007812         45        640: 1\n",
      "tensor([0.72482], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.475      0.374      0.387      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.74G    0.04054    0.02986   0.007101         70        640: 1\n",
      "tensor([0.66123], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.581      0.347      0.376      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.74G    0.04025    0.02926   0.007131         34        640: 1\n",
      "tensor([0.56223], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.504       0.37      0.375      0.241\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.74G     0.0397    0.02907    0.00728         52        640: 1\n",
      "tensor([0.66335], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.54      0.373      0.368      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.74G    0.03969    0.02906   0.007207         46        640: 1\n",
      "tensor([0.61840], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.514      0.379      0.374      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.74G    0.03875    0.02851   0.006784         42        640: 1\n",
      "tensor([0.61116], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.51      0.397      0.381      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.74G    0.03927    0.02852   0.006644         56        640: 1\n",
      "tensor([0.71512], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.569      0.395      0.371      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.74G    0.03928    0.02851    0.00681         47        640: 1\n",
      "tensor([0.71762], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.381      0.378      0.257\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.74G     0.0388    0.02832   0.006487         30        640: 1\n",
      "tensor([0.47519], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.558      0.377      0.379      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.74G    0.03844    0.02799   0.006478         31        640: 1\n",
      "tensor([0.47536], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.658      0.412      0.407      0.273\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.74G    0.03879    0.02837    0.00613         16        640: 1\n",
      "tensor([0.43192], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.55      0.399      0.364      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.74G     0.0381      0.028   0.006083         39        640: 1\n",
      "tensor([0.60975], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.545      0.377      0.395      0.255\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.74G    0.03749    0.02763   0.006079         45        640: 1\n",
      "tensor([0.56055], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.661      0.375      0.382      0.257\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.74G    0.03737    0.02774   0.006253         50        640: 1\n",
      "tensor([0.57566], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.51      0.418      0.387      0.255\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.74G    0.03778    0.02732   0.005801         28        640: 1\n",
      "tensor([0.55349], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.566      0.395      0.386      0.252\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.74G    0.03681    0.02739   0.005857         36        640: 1\n",
      "tensor([0.55980], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.587      0.399      0.392      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.74G    0.03682    0.02696   0.005716         66        640: 1\n",
      "tensor([0.73071], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.563      0.434      0.407      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.74G     0.0365    0.02677   0.005773         54        640: 1\n",
      "tensor([0.63653], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.596      0.387      0.389      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.74G    0.03641    0.02678   0.005471         41        640: 1\n",
      "tensor([0.61413], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.553      0.403      0.395      0.258\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.74G    0.03616    0.02637   0.005476         45        640: 1\n",
      "tensor([0.52492], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.555      0.403      0.402      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.74G    0.03549    0.02632   0.005372         67        640: 1\n",
      "tensor([0.61270], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.542      0.414      0.409      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.74G    0.03561    0.02628   0.005549         61        640: 1\n",
      "tensor([0.61410], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616       0.39      0.397      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.74G    0.03517    0.02541   0.005098         55        640: 1\n",
      "tensor([0.70058], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.57      0.404      0.407      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.74G    0.03535     0.0257   0.005171         34        640: 1\n",
      "tensor([0.51350], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.54      0.402      0.404      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.74G    0.03489     0.0258   0.004995         36        640: 1\n",
      "tensor([0.54338], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.588      0.405      0.393      0.266\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.74G    0.03477    0.02584   0.005084         52        640: 1\n",
      "tensor([0.59713], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.445      0.435      0.395      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.74G    0.03457    0.02505   0.004913         31        640: 1\n",
      "tensor([0.44029], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.439       0.42      0.283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.74G    0.03426    0.02512   0.004851         49        640: 1\n",
      "tensor([0.55936], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.677      0.371      0.408      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.74G     0.0343    0.02478   0.004922         52        640: 1\n",
      "tensor([0.59926], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.619       0.39      0.393      0.266\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.74G     0.0341    0.02513   0.004787         41        640: 1\n",
      "tensor([0.61007], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.507      0.378      0.397      0.274\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.74G     0.0338     0.0245   0.004837         34        640: 1\n",
      "tensor([0.42397], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.569      0.392       0.39      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.74G    0.03337    0.02404   0.004707         37        640: 1\n",
      "tensor([0.52908], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.584      0.395       0.42      0.282\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.74G    0.03344     0.0244   0.004454         33        640: 1\n",
      "tensor([0.51669], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.62      0.393      0.405      0.276\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.74G    0.03329    0.02417   0.004624         20        640: 1\n",
      "tensor([0.48225], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.537      0.419       0.41      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.74G    0.03266    0.02443   0.004556         49        640: 1\n",
      "tensor([0.58084], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.564      0.401      0.405       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.74G    0.03241    0.02397   0.004394         93        640: 1\n",
      "tensor([0.75147], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.465      0.433      0.407      0.276\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.74G    0.03248    0.02405   0.004173         32        640: 1\n",
      "tensor([0.39181], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.55      0.402      0.417      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.74G    0.03207    0.02365   0.004463         47        640: 1\n",
      "tensor([0.54560], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.598      0.394      0.411      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.74G    0.03204    0.02393   0.004265         43        640: 1\n",
      "tensor([0.44075], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.595      0.402      0.413      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.74G    0.03186    0.02335   0.003952         37        640: 1\n",
      "tensor([0.46623], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.595      0.369       0.41      0.282\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.74G    0.03193    0.02347   0.004134         53        640: 1\n",
      "tensor([0.58046], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.665      0.385      0.412       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.74G    0.03181    0.02362   0.004074         59        640: 1\n",
      "tensor([0.55128], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.635      0.388      0.406      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.74G    0.03113    0.02287     0.0041         26        640: 1\n",
      "tensor([0.31168], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.631       0.39      0.402      0.273\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.74G     0.0314     0.0231   0.003943         54        640: 1\n",
      "tensor([0.57076], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.69      0.374      0.404      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.74G    0.03093    0.02265   0.003918         31        640: 1\n",
      "tensor([0.48577], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.581      0.409      0.412      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.74G    0.03107    0.02292   0.003835         56        640: 1\n",
      "tensor([0.55356], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.642      0.408       0.42      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.74G     0.0307    0.02246   0.004057         45        640: 1\n",
      "tensor([0.43482], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.424      0.418      0.287\n",
      "\n",
      "80 epochs completed in 0.696 hours.\n",
      "Optimizer stripped from runs/train/k_vLwf_2_openimages/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_vLwf_2_openimages/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_vLwf_2_openimages/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.646      0.404       0.42      0.292\n",
      "                   car       1200        287      0.739      0.589      0.592      0.399\n",
      "                   van       1200         29      0.469      0.621      0.565      0.425\n",
      "                 truck       1200         29      0.386      0.517      0.469      0.338\n",
      "                person       1200       2264      0.477      0.347      0.312      0.163\n",
      "               bicycle       1200         54      0.561      0.389      0.409      0.241\n",
      "                  bird       1200        136      0.629      0.566      0.557      0.349\n",
      "                  boat       1200        145      0.541      0.407      0.445      0.264\n",
      "                bottle       1200         31          1          0          0          0\n",
      "                   bus       1200         15      0.707      0.806      0.829      0.669\n",
      "                   cat       1200          1          1          0     0.0663     0.0199\n",
      "                 chair       1200         21     0.0923     0.0969     0.0599     0.0268\n",
      "                   dog       1200         42      0.866      0.452      0.603      0.376\n",
      "                 horse       1200         44      0.835      0.575      0.702      0.451\n",
      "                 sheep       1200         10      0.264        0.3      0.249      0.154\n",
      "             billboard       1200          4          1          0          0          0\n",
      "                rabbit       1200         11       0.83      0.445      0.617      0.432\n",
      "                monkey       1200         18      0.774      0.944       0.85      0.638\n",
      "                   pig       1200          6      0.537      0.833      0.729      0.658\n",
      "                   toy       1200         64      0.342      0.125      0.142     0.0779\n",
      "         traffic light       1200         18          1      0.222      0.363      0.211\n",
      "          traffic sign       1200          4      0.512       0.25      0.265      0.235\n",
      "Results saved to \u001b[1mruns/train/k_vLwf_2_openimages\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_vLwf_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/a2320291aeff42af8ceba1ce09e3631b\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.45920015391186053\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.4089484106082936\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.24061575134888943\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.5605473336933104\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.3888888888888889\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.5961010100763148\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 45.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5574024949649484\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.3486557742689347\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.6293653158631805\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.5661764705882353\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 77.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.46428948662458785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 50.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.4452615485923257\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.2642183292626098\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.540531719889518\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.4068965517241379\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 59.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.7536345638751254\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.8293034274020189\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.6691938192026374\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.7074266567187351\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.806300759412439\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6553397427876682\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 60.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.5920970903999454\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.3994065483008674\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.7387557896621249\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5888501742160279\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 169.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.06633333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.019899999999999998\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.09453744877372236\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.05992704421334666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.026755627725903425\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.09228797482039282\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.09689932204552087\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.5942758891256794\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6033958217084059\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.3760505246199865\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.8658653825320493\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.4523809523809524\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.6810927059466866\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.7020109541024501\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.4513605442509278\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.8349711718887346\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.5751054084387418\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (0.48225218057632446, 3.9672205448150635)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.05763712346356285, 0.42042299154076596)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.027215467006195662, 0.29174712497188576)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.4011087103563871, 0.8662364440405455)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.08025311422055227, 0.4386120328099614)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.8505595640431651\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8495847457627119\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.6376440584403429\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.7736526704078326\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.9444444444444444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.40184522344086193\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 862.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.31213799920611696\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.16345916120268536\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.4769550816843335\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.3471731448763251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 786.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.6529316560112209\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.7285507246376813\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.6575465414273995\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.536737616461408\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.8333333333333334\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.5794364366635634\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.617319737919738\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.43199451924651927\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.8298742368314703\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.445111554994919\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.2808489009626992\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.24892741484390485\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.1535399407872054\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.26399617780718504\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.3\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.18303985542924286\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.1420099429501945\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.07792004311677664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.3416955276205831\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.36341385523015984\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.3634773565773567\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.2110312810462811\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.2220560502675331\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.3358945282279004\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.2651766513056836\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.23522683051715312\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.5117052536407376\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.25\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.03069774992763996, 0.08343411982059479)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.003834960050880909, 0.05950014293193817)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.022456154227256775, 0.04187771677970886)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.4423738490972497\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.4693710888789906\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.33754914101429107\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.38643914353872705\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.5172413793103449\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.052474625408649445, 0.0644683763384819)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.012467478401958942, 0.03387990593910217)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.02354295551776886, 0.03135058656334877)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.5342672546301831\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.5648416289342488\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.4254515852380562\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.46896974159374766\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.6206896551724138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 18.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_vLwf_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/a2320291aeff42af8ceba1ce09e3631b\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : 0.0001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_vLwf_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.39 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Still uploading 1 file(s), remaining 329.83 KB/329.83 KB\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_Lwf/weights/last.pt \\\n",
    "--name k_vLwf_2_openimages \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "05a139f0-ecd9-4988-b201-380fed168b74",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_vLwf_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.693      0.407      0.408      0.287\n",
      "                   car        600        113      0.668      0.416      0.493      0.332\n",
      "                   van        600          6      0.249      0.167     0.0884     0.0652\n",
      "                 truck        600         17      0.771      0.588      0.724      0.565\n",
      "                person        600       1131      0.549      0.286      0.328      0.173\n",
      "               bicycle        600         43      0.676      0.291      0.419      0.263\n",
      "                  bird        600         61      0.769      0.492      0.575       0.41\n",
      "                  boat        600         82      0.793      0.373      0.489      0.298\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.785      0.333      0.339      0.305\n",
      "                   cat        600          5          1          0      0.289      0.231\n",
      "                 chair        600         12      0.427      0.312      0.295      0.151\n",
      "                   dog        600         25      0.804      0.657      0.707      0.513\n",
      "                 horse        600         37      0.845      0.591      0.724      0.444\n",
      "                 sheep        600          8      0.676        0.5      0.602       0.45\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1      0.185          1      0.199      0.179\n",
      "                monkey        600         16      0.859      0.812      0.859       0.51\n",
      "                   pig        600          7      0.536      0.714      0.754      0.618\n",
      "                   toy        600         42        0.3     0.0238      0.156     0.0927\n",
      "         traffic light        600          5      0.935        0.4      0.439      0.323\n",
      "          traffic sign        600          1      0.417          1      0.497      0.398\n",
      "Speed: 0.1ms pre-process, 2.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp224\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_vLwf_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.798      0.238        0.3      0.186\n",
      "                   car       4952       1201      0.725      0.693      0.739      0.526\n",
      "                person       4952       4528      0.475      0.472      0.431      0.242\n",
      "             aeroplane       4952        285          1          0          0          0\n",
      "               bicycle       4952        337      0.615      0.543      0.576      0.353\n",
      "                  bird       4952        459      0.699      0.458      0.544      0.315\n",
      "                  boat       4952        263      0.354      0.506      0.422      0.224\n",
      "                bottle       4952        469      0.823     0.0597       0.12     0.0648\n",
      "                   bus       4952        213       0.75      0.465      0.578       0.43\n",
      "                   cat       4952        358          1          0      0.529      0.346\n",
      "                 chair       4952        756      0.499      0.136      0.183     0.0967\n",
      "                   cow       4952        244          1          0        0.1     0.0803\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489      0.685      0.401      0.515      0.306\n",
      "                 horse       4952        348      0.798      0.603      0.685        0.4\n",
      "             motorbike       4952        325          1          0          0          0\n",
      "           pottedplant       4952        480          1          0          0          0\n",
      "                 sheep       4952        242       0.54      0.426       0.43      0.243\n",
      "                  sofa       4952        239          1          0          0          0\n",
      "                 train       4952        282          1          0      0.147     0.0903\n",
      "             tvmonitor       4952        308          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp225\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_vLwf_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.625       0.17      0.163     0.0827\n",
      "                   car       2244       8711       0.67      0.547      0.596      0.297\n",
      "                   van       2244        861      0.667      0.113      0.213      0.131\n",
      "                 truck       2244        333      0.258      0.263      0.155     0.0876\n",
      "                  tram       2244        138          1          0          0          0\n",
      "                person       2244       1286      0.403      0.441      0.342      0.145\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          1          0          0          0\n",
      "                  misc       2244        284          0          0          0          0\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp226\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# openimages\n",
    "model = f'runs/train/k_vLwf_2_openimages/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "97c8b8f5-3814-45bf-bc1e-fb68fa65494c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a2daca7-8963-4d9a-844f-3328b8700a19",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13c400fa-bfce-4c90-a601-2740b75796ea",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "34b66d06-155e-4cb7-b761-ab8bd3389086",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=Lwf1e-4_k_v_2_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/e4a002b62d3442aca700cfbc2213834b\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/Lwf1e-4_k_v_2_openimages/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/Lwf1e-4_k_v_2_openimages\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.51G    0.08549    0.04225    0.05377         40        640: 1\n",
      "tensor([1.82089], device='cuda:0', grad_fn=<AddBackward0>) tensor(7900.62207, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.901     0.0713     0.0818       0.04\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.62G    0.06394      0.038    0.02918         63        640: 1\n",
      "tensor([1.53560], device='cuda:0', grad_fn=<AddBackward0>) tensor(3197.93799, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.638      0.187      0.207      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.62G    0.06068    0.03735    0.02548         94        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 129607 ms: Connection timed out\n",
      "       2/79      5.62G    0.06007    0.03653    0.02471         57        640: 1\n",
      "tensor([1.26517], device='cuda:0', grad_fn=<AddBackward0>) tensor(3405.69043, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.593      0.231      0.253      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.62G    0.05612    0.03656    0.02206         42        640: 1\n",
      "tensor([1.12222], device='cuda:0', grad_fn=<AddBackward0>) tensor(3157.71924, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.584      0.288      0.287      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.62G    0.05336    0.03533     0.0206         36        640: 1\n",
      "tensor([1.35010], device='cuda:0', grad_fn=<AddBackward0>) tensor(3805.93823, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.365      0.314      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.62G    0.05184    0.03614    0.01897         39        640: 1\n",
      "tensor([1.18859], device='cuda:0', grad_fn=<AddBackward0>) tensor(3634.52197, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.538      0.351      0.326      0.209\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.62G    0.05081    0.03541    0.01783         68        640: 1\n",
      "tensor([1.59148], device='cuda:0', grad_fn=<AddBackward0>) tensor(4313.93311, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.369      0.328      0.209\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.62G     0.0501    0.03517    0.01805         31        640: 1\n",
      "tensor([1.19304], device='cuda:0', grad_fn=<AddBackward0>) tensor(4923.98486, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.528      0.373      0.338       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.62G    0.04949      0.035    0.01718         35        640: 1\n",
      "tensor([1.12691], device='cuda:0', grad_fn=<AddBackward0>) tensor(3915.95435, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.531      0.385      0.334      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.62G    0.04883    0.03475    0.01654         42        640: 1\n",
      "tensor([1.27884], device='cuda:0', grad_fn=<AddBackward0>) tensor(4326.62695, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.396      0.345      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.62G    0.04839    0.03417    0.01605         38        640: 1\n",
      "tensor([1.18729], device='cuda:0', grad_fn=<AddBackward0>) tensor(4177.15820, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.588       0.34      0.346      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.62G    0.04764    0.03454    0.01533         59        640: 1\n",
      "tensor([1.36472], device='cuda:0', grad_fn=<AddBackward0>) tensor(4282.60107, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.401       0.35      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.62G    0.04695    0.03407    0.01581         46        640: 1\n",
      "tensor([1.21113], device='cuda:0', grad_fn=<AddBackward0>) tensor(4409.49902, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.574      0.379      0.361      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.62G    0.04691    0.03369    0.01469         47        640: 1\n",
      "tensor([1.26319], device='cuda:0', grad_fn=<AddBackward0>) tensor(4414.86523, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.493      0.375      0.354      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.62G    0.04636    0.03321    0.01476         32        640: 1\n",
      "tensor([1.01533], device='cuda:0', grad_fn=<AddBackward0>) tensor(4500.01758, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.593       0.38      0.366      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.62G    0.04599    0.03375    0.01399         48        640: 1\n",
      "tensor([1.24264], device='cuda:0', grad_fn=<AddBackward0>) tensor(4101.37402, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.609      0.366       0.38      0.247\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.62G    0.04594    0.03297    0.01422         43        640: 1\n",
      "tensor([1.12945], device='cuda:0', grad_fn=<AddBackward0>) tensor(3645.48486, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.638      0.377      0.382      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.62G    0.04566    0.03338    0.01387         64        640: 1\n",
      "tensor([1.18790], device='cuda:0', grad_fn=<AddBackward0>) tensor(3779.34106, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.587      0.399      0.385      0.246\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.62G    0.04541    0.03318    0.01389         61        640: 1\n",
      "tensor([1.25004], device='cuda:0', grad_fn=<AddBackward0>) tensor(4581.68945, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.615      0.401      0.395      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.62G    0.04483     0.0334    0.01343         29        640: 1\n",
      "tensor([1.14488], device='cuda:0', grad_fn=<AddBackward0>) tensor(4693.20215, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.65      0.365      0.388      0.255\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.62G    0.04451    0.03194    0.01268         39        640: 1\n",
      "tensor([1.15750], device='cuda:0', grad_fn=<AddBackward0>) tensor(3837.09570, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.598      0.398       0.39      0.263\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.62G    0.04366     0.0325     0.0133         40        640: 1\n",
      "tensor([1.20098], device='cuda:0', grad_fn=<AddBackward0>) tensor(5267.85010, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.58      0.392      0.377      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.62G    0.04386    0.03225    0.01307         44        640: 1\n",
      "tensor([1.19862], device='cuda:0', grad_fn=<AddBackward0>) tensor(4327.02148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.585      0.396      0.392       0.26\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79      5.62G     0.0436    0.03248    0.01288         31        640: 1\n",
      "tensor([1.00965], device='cuda:0', grad_fn=<AddBackward0>) tensor(4300.21387, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556       0.39      0.391       0.25\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.62G    0.04298    0.03182    0.01246         72        640: 1\n",
      "tensor([1.23391], device='cuda:0', grad_fn=<AddBackward0>) tensor(5007.93066, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.621      0.376      0.398      0.269\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.62G    0.04301    0.03218    0.01261         41        640: 1\n",
      "tensor([1.14549], device='cuda:0', grad_fn=<AddBackward0>) tensor(4726.55078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.566      0.382      0.398      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.62G    0.04338    0.03206    0.01238         33        640: 1\n",
      "tensor([1.18279], device='cuda:0', grad_fn=<AddBackward0>) tensor(4081.96021, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.611       0.38      0.401      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.62G    0.04295     0.0315    0.01198         30        640: 1\n",
      "tensor([1.00961], device='cuda:0', grad_fn=<AddBackward0>) tensor(4571.01562, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.629      0.391      0.395      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.62G     0.0428    0.03137    0.01194         30        640: 1\n",
      "tensor([1.00385], device='cuda:0', grad_fn=<AddBackward0>) tensor(4336.08203, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.628      0.392      0.402      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.62G    0.04208    0.03118    0.01175         43        640: 1\n",
      "tensor([1.15377], device='cuda:0', grad_fn=<AddBackward0>) tensor(4537.13037, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.642      0.377      0.408      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.62G    0.04247    0.03114    0.01136         62        640: 1\n",
      "tensor([1.22207], device='cuda:0', grad_fn=<AddBackward0>) tensor(4238.35059, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.399        0.4       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.62G    0.04213    0.03097    0.01159         40        640: 1\n",
      "tensor([1.09130], device='cuda:0', grad_fn=<AddBackward0>) tensor(4264.46240, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.661      0.356      0.404      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.62G    0.04176    0.03102    0.01133         37        640: 1\n",
      "tensor([1.06538], device='cuda:0', grad_fn=<AddBackward0>) tensor(4784.20508, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.652      0.388      0.407      0.283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.62G    0.04118    0.03049    0.01166         45        640: 1\n",
      "tensor([1.19905], device='cuda:0', grad_fn=<AddBackward0>) tensor(4089.02490, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.61        0.4      0.392      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.62G    0.04171    0.03124    0.01113         70        640: 1\n",
      "tensor([1.18234], device='cuda:0', grad_fn=<AddBackward0>) tensor(4239.51514, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.578      0.402      0.406      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.62G    0.04123    0.03057     0.0112         34        640: 1\n",
      "tensor([1.00431], device='cuda:0', grad_fn=<AddBackward0>) tensor(3879.36279, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.511      0.427      0.403      0.266\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.62G    0.04077    0.03022     0.0111         52        640: 1\n",
      "tensor([1.13842], device='cuda:0', grad_fn=<AddBackward0>) tensor(4632.35742, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.607      0.389        0.4      0.273\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.62G     0.0407    0.03041    0.01104         46        640: 1\n",
      "tensor([1.06903], device='cuda:0', grad_fn=<AddBackward0>) tensor(4063.10132, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.639      0.391      0.413       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.62G    0.03999    0.02992    0.01092         42        640: 1\n",
      "tensor([1.09119], device='cuda:0', grad_fn=<AddBackward0>) tensor(3543.51831, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.665       0.37      0.424      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.62G    0.04058    0.03002    0.01079         56        640: 1\n",
      "tensor([1.12668], device='cuda:0', grad_fn=<AddBackward0>) tensor(3730.60059, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.634      0.375       0.41      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.62G    0.04067    0.03005    0.01079         47        640: 1\n",
      "tensor([1.37940], device='cuda:0', grad_fn=<AddBackward0>) tensor(5479.37256, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.594      0.436      0.427      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.62G    0.04011    0.03009    0.01049         30        640: 1\n",
      "tensor([0.90620], device='cuda:0', grad_fn=<AddBackward0>) tensor(4058.52173, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.617      0.418      0.417      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.62G    0.03988    0.02984    0.01027         31        640: 1\n",
      "tensor([0.89822], device='cuda:0', grad_fn=<AddBackward0>) tensor(4004.89966, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.668      0.384      0.435      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.62G    0.04023    0.03025    0.01022         16        640: 1\n",
      "tensor([0.85170], device='cuda:0', grad_fn=<AddBackward0>) tensor(4165.95020, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.67      0.381      0.426      0.287\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.62G    0.03981    0.03001    0.01033         39        640: 1\n",
      "tensor([0.97918], device='cuda:0', grad_fn=<AddBackward0>) tensor(3548.99780, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.643      0.406      0.434      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.62G    0.03922    0.02957   0.009993         45        640: 1\n",
      "tensor([0.90999], device='cuda:0', grad_fn=<AddBackward0>) tensor(3447.77148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.583      0.413      0.404      0.283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.62G    0.03923    0.02994    0.01053         50        640: 1\n",
      "tensor([1.05356], device='cuda:0', grad_fn=<AddBackward0>) tensor(3932.60742, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.599      0.412      0.427      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.62G    0.03942    0.02941   0.009889         28        640: 1\n",
      "tensor([0.96441], device='cuda:0', grad_fn=<AddBackward0>) tensor(3987.39233, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.653      0.395      0.424      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.62G     0.0385     0.0296    0.01011         36        640: 1\n",
      "tensor([0.97427], device='cuda:0', grad_fn=<AddBackward0>) tensor(3820.26758, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.639      0.399      0.418      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.62G    0.03889    0.02934    0.00991         66        640: 1\n",
      "tensor([1.14350], device='cuda:0', grad_fn=<AddBackward0>) tensor(3711.34131, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.579      0.407       0.42      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.62G    0.03826    0.02912   0.009953         54        640: 1\n",
      "tensor([1.00931], device='cuda:0', grad_fn=<AddBackward0>) tensor(3389.97168, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.524      0.412      0.409      0.285\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.62G    0.03863    0.02932   0.009722         41        640: 1\n",
      "tensor([0.99945], device='cuda:0', grad_fn=<AddBackward0>) tensor(3297.91919, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.673       0.39      0.422      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.62G    0.03801    0.02881   0.009829         45        640: 1\n",
      "tensor([0.98336], device='cuda:0', grad_fn=<AddBackward0>) tensor(3614.24048, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.585       0.42      0.413       0.29\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.62G    0.03775    0.02895   0.009599         67        640: 1\n",
      "tensor([1.18788], device='cuda:0', grad_fn=<AddBackward0>) tensor(4086.13110, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.638      0.393      0.426      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.62G    0.03785    0.02913   0.009876         61        640: 1\n",
      "tensor([1.03220], device='cuda:0', grad_fn=<AddBackward0>) tensor(3857.02661, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.617      0.413      0.422      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.62G    0.03759     0.0282   0.009584         55        640: 1\n",
      "tensor([1.09696], device='cuda:0', grad_fn=<AddBackward0>) tensor(3496.68921, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.652      0.391      0.433      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.62G    0.03777    0.02848   0.009471         34        640: 1\n",
      "tensor([1.04086], device='cuda:0', grad_fn=<AddBackward0>) tensor(3668.24292, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.674      0.405      0.438      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.62G    0.03751    0.02872   0.009286         36        640: 1\n",
      "tensor([0.96027], device='cuda:0', grad_fn=<AddBackward0>) tensor(3583.62598, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.674      0.403      0.438      0.305\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.62G    0.03729    0.02875   0.009413         52        640: 1\n",
      "tensor([1.13587], device='cuda:0', grad_fn=<AddBackward0>) tensor(4413.10791, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.612      0.426      0.429      0.299\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.62G    0.03707    0.02807   0.009098         31        640: 1\n",
      "tensor([0.91026], device='cuda:0', grad_fn=<AddBackward0>) tensor(3950.79248, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.64      0.412      0.436      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.62G    0.03689    0.02821    0.00933         49        640: 1\n",
      "tensor([0.98154], device='cuda:0', grad_fn=<AddBackward0>) tensor(3211.55859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.629      0.411      0.425      0.294\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.62G    0.03693     0.0279   0.009316         52        640: 1\n",
      "tensor([1.02700], device='cuda:0', grad_fn=<AddBackward0>) tensor(3056.82764, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.624      0.434      0.437      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.62G    0.03697    0.02831   0.009211         41        640: 1\n",
      "tensor([1.08060], device='cuda:0', grad_fn=<AddBackward0>) tensor(3768.08716, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.625      0.402      0.423      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.62G     0.0367    0.02777   0.009285         34        640: 1\n",
      "tensor([0.82692], device='cuda:0', grad_fn=<AddBackward0>) tensor(3247.53369, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.607      0.411      0.423      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.62G    0.03623    0.02744   0.009234         37        640: 1\n",
      "tensor([0.92128], device='cuda:0', grad_fn=<AddBackward0>) tensor(3371.67310, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.633      0.404      0.428      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.62G    0.03657    0.02776   0.008845         33        640: 1\n",
      "tensor([0.95182], device='cuda:0', grad_fn=<AddBackward0>) tensor(3478.97852, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.574      0.449      0.427      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.62G    0.03654    0.02773   0.009091         20        640: 1\n",
      "tensor([0.95786], device='cuda:0', grad_fn=<AddBackward0>) tensor(3312.19604, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.539      0.431      0.422      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.62G    0.03586    0.02801   0.009097         49        640: 1\n",
      "tensor([0.94446], device='cuda:0', grad_fn=<AddBackward0>) tensor(2925.36035, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.606      0.414      0.433      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.62G    0.03561    0.02772   0.008787         93        640: 1\n",
      "tensor([1.20014], device='cuda:0', grad_fn=<AddBackward0>) tensor(3491.96826, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.636      0.425      0.431      0.308\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.62G    0.03573    0.02778   0.008645         32        640: 1\n",
      "tensor([0.78555], device='cuda:0', grad_fn=<AddBackward0>) tensor(2906.37891, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.63      0.432      0.438      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.62G    0.03553    0.02752   0.008943         47        640: 1\n",
      "tensor([1.01241], device='cuda:0', grad_fn=<AddBackward0>) tensor(3453.29956, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591      0.443      0.439      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.62G    0.03546    0.02792   0.008545         43        640: 1\n",
      "tensor([0.81809], device='cuda:0', grad_fn=<AddBackward0>) tensor(2862.29272, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.632      0.415      0.433      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.62G    0.03538    0.02721   0.008543         37        640: 1\n",
      "tensor([0.88276], device='cuda:0', grad_fn=<AddBackward0>) tensor(2981.17065, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.643      0.424      0.437       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.62G    0.03543    0.02741   0.008663         53        640: 1\n",
      "tensor([0.99548], device='cuda:0', grad_fn=<AddBackward0>) tensor(3310.40088, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.65      0.416      0.437      0.307\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.62G    0.03542    0.02775   0.008759         59        640: 1\n",
      "tensor([1.03932], device='cuda:0', grad_fn=<AddBackward0>) tensor(3319.16943, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.63      0.422      0.438       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.62G     0.0349    0.02683   0.008818         26        640: 1\n",
      "tensor([0.68256], device='cuda:0', grad_fn=<AddBackward0>) tensor(2846.82886, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.666      0.411      0.434      0.305\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.62G    0.03529    0.02718   0.008412         54        640: 1\n",
      "tensor([0.99065], device='cuda:0', grad_fn=<AddBackward0>) tensor(3317.29468, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.62      0.434      0.432      0.307\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.62G    0.03479    0.02683   0.008514         31        640: 1\n",
      "tensor([0.91240], device='cuda:0', grad_fn=<AddBackward0>) tensor(2995.65063, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.653      0.413      0.433      0.305\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.62G    0.03471    0.02712    0.00831         56        640: 1\n",
      "tensor([0.98839], device='cuda:0', grad_fn=<AddBackward0>) tensor(3181.64380, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.658      0.411      0.438      0.309\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.62G    0.03445    0.02685   0.008484         45        640: 1\n",
      "tensor([0.80022], device='cuda:0', grad_fn=<AddBackward0>) tensor(2739.13794, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.647      0.413      0.435      0.307\n",
      "\n",
      "80 epochs completed in 0.778 hours.\n",
      "Optimizer stripped from runs/train/Lwf1e-4_k_v_2_openimages/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/Lwf1e-4_k_v_2_openimages/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/Lwf1e-4_k_v_2_openimages/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591      0.443      0.439      0.311\n",
      "                   car       1200        287       0.64      0.617      0.616      0.417\n",
      "                   van       1200         29      0.717      0.655      0.755      0.593\n",
      "                 truck       1200         29       0.41      0.621       0.55      0.363\n",
      "                person       1200       2264      0.435      0.391      0.326      0.171\n",
      "               bicycle       1200         54      0.672       0.53      0.526      0.336\n",
      "                  bird       1200        136      0.594      0.635      0.582      0.382\n",
      "                  boat       1200        145      0.629      0.462        0.5      0.277\n",
      "                bottle       1200         31     0.0816     0.0323     0.0101    0.00377\n",
      "                   bus       1200         15      0.439      0.733       0.73      0.664\n",
      "                   cat       1200          1          1          0    0.00336    0.00134\n",
      "                 chair       1200         21      0.126       0.19      0.102     0.0385\n",
      "                   dog       1200         42      0.674      0.571      0.643      0.402\n",
      "                 horse       1200         44      0.826      0.727      0.755      0.534\n",
      "                 sheep       1200         10       0.36        0.6      0.496      0.307\n",
      "             billboard       1200          4          1          0     0.0318    0.00954\n",
      "                rabbit       1200         11      0.766      0.545       0.62      0.537\n",
      "                monkey       1200         18      0.682      0.889      0.826      0.624\n",
      "                   pig       1200          6      0.606      0.667      0.699      0.557\n",
      "                   toy       1200         64      0.279      0.188       0.14     0.0838\n",
      "         traffic light       1200         18          1          0     0.0655     0.0323\n",
      "          traffic sign       1200          4      0.483       0.25      0.246      0.197\n",
      "Results saved to \u001b[1mruns/train/Lwf1e-4_k_v_2_openimages\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : Lwf1e-4_k_v_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/e4a002b62d3442aca700cfbc2213834b\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.5924878102299601\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.5261822962819176\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.33566654922974354\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.6715156859121422\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.5301022867160433\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 29.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.03178707951070337\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.009536123853211013\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6138328205156542\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 59.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.581919393413695\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.381910302243959\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.5940839532651846\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.6349398418025869\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 86.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5327745458246375\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 40.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.5002707791726485\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.2773541836897649\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.6290281425052314\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.46206896551724136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 67.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.04623912352484075\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.01014651816111515\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.0037738528677095824\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.08160990208315311\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.03225806451612903\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.5490005487441934\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.7304752671445096\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.6636223231269811\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.4387220957049884\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.7333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6279207846619387\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 100.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.6157203690987496\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.4166625484181344\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6395308537891293\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.6167247386759582\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 177.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.0033614864864864864\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.0013445945945945947\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.1516583477316043\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 28.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.10216704622449113\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.0385097520206152\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.12598367224852794\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.19047619047619047\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6186453564361084\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6434223823166578\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.4024688397933566\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.6743679585358897\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5714285714285714\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.7737111575314489\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.7552587063917774\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.5343845188856158\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.8264845442233355\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.7272727272727273\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 32.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (0.9578642845153809, 9.153904914855957)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.0818489369536816, 0.4393080076675333)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.04004698654439197, 0.31078896139077267)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.49261649542879576, 0.9014008331470906)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.07125861139513864, 0.4486778860181513)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.7717540045703564\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8258027395548919\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.624003999815764\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.6818959729261335\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.8888888888888888\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.41195800672265825\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 1148.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.32598163114560247\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.1711409246747616\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.43541269332084076\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.3909010600706714\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 885.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.6350567762345453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.6990126960418224\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.5572558566094101\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.6063087466596238\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.6371806490285495\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.6202971495365716\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.536562127707923\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.7659935970683635\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.5454545454545454\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.4501651851632187\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.49592180078935044\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.30744960802076354\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.36021148358752114\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.2241983580102269\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 31.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.1397125534311348\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.08378668954474515\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.2787581857349299\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.1875\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.06552334923158451\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.03231743242427948\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.3295585058362953\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.24645038167938932\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.19716030534351145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.48338948718638164\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.25\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.03445226326584816, 0.08548931032419205)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.008310263976454735, 0.05376855656504631)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.026825256645679474, 0.04224928840994835)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.4939777298942569\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.5498687656653721\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.36317803806059934\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.4102304234439541\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.6206896551724138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 18.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.050556134432554245, 0.0669565200805664)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.014531247317790985, 0.029936207458376884)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.023551739752292633, 0.02710653655230999)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.6847054283986677\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.7546699127169916\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.5932834522895979\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.7170266272292827\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.6551724137931034\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : Lwf1e-4_k_v_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/e4a002b62d3442aca700cfbc2213834b\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/Lwf1e-4_k_v_2_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.36 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--name Lwf1e-4_k_v_2_openimages \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "e6552677-86f1-459e-af6f-0fd17bac41fc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/Lwf1e-4_k_v_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.559      0.454      0.459      0.314\n",
      "                   car        600        113      0.521      0.496      0.495      0.334\n",
      "                   van        600          6      0.245      0.167      0.105     0.0923\n",
      "                 truck        600         17       0.82      0.529      0.696      0.551\n",
      "                person        600       1131      0.506      0.362      0.346      0.184\n",
      "               bicycle        600         43      0.775        0.4      0.523      0.344\n",
      "                  bird        600         61       0.69      0.557      0.563      0.423\n",
      "                  boat        600         82      0.757        0.5      0.548      0.309\n",
      "                bottle        600          1          0          0     0.0765     0.0377\n",
      "                   bus        600          3      0.368      0.333      0.217      0.185\n",
      "                   cat        600          5          1          0      0.561      0.335\n",
      "                 chair        600         12      0.278      0.333       0.29      0.184\n",
      "                   dog        600         25       0.71        0.8      0.724      0.554\n",
      "                 horse        600         37      0.784      0.757      0.749        0.5\n",
      "                 sheep        600          8      0.568      0.625      0.698      0.535\n",
      "                 train        600          2          1          0    0.00967    0.00416\n",
      "             billboard        600          3          0          0     0.0399     0.0325\n",
      "                rabbit        600          1      0.223          1      0.249      0.199\n",
      "                monkey        600         16      0.717      0.812      0.805      0.484\n",
      "                   pig        600          7      0.625          1      0.904      0.735\n",
      "                   toy        600         42      0.471      0.119      0.178     0.0807\n",
      "         traffic light        600          5      0.936        0.2      0.339      0.211\n",
      "          traffic sign        600          1      0.302          1      0.995      0.597\n",
      "Speed: 0.1ms pre-process, 2.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp227\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/Lwf1e-4_k_v_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.565      0.478      0.523      0.324\n",
      "                   car       4952       1201      0.526      0.872      0.805      0.579\n",
      "                person       4952       4528      0.347      0.684      0.514      0.303\n",
      "             aeroplane       4952        285      0.863      0.133      0.362      0.186\n",
      "               bicycle       4952        337      0.498      0.804      0.726      0.468\n",
      "                  bird       4952        459      0.523      0.704      0.692       0.42\n",
      "                  boat       4952        263      0.264      0.745      0.576      0.318\n",
      "                bottle       4952        469      0.576      0.382      0.435       0.26\n",
      "                   bus       4952        213      0.676      0.783      0.783      0.588\n",
      "                   cat       4952        358      0.792      0.441      0.708      0.449\n",
      "                 chair       4952        756      0.385      0.366      0.335      0.188\n",
      "                   cow       4952        244      0.512      0.108      0.266      0.169\n",
      "           diningtable       4952        206      0.484      0.132      0.252     0.0958\n",
      "                   dog       4952        489      0.625      0.746      0.756      0.483\n",
      "                 horse       4952        348      0.663      0.819      0.826      0.554\n",
      "             motorbike       4952        325      0.814      0.178      0.439      0.213\n",
      "           pottedplant       4952        480      0.346     0.0429     0.0884     0.0407\n",
      "                 sheep       4952        242      0.319      0.662      0.523      0.324\n",
      "                  sofa       4952        239      0.637      0.364      0.464      0.291\n",
      "                 train       4952        282      0.758      0.457      0.611      0.387\n",
      "             tvmonitor       4952        308      0.701       0.14      0.303      0.158\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp228\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/Lwf1e-4_k_v_2_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.59      0.185       0.17     0.0852\n",
      "                   car       2244       8711      0.673      0.642      0.669      0.335\n",
      "                   van       2244        861      0.459     0.0825      0.187      0.109\n",
      "                 truck       2244        333       0.21      0.321      0.179      0.104\n",
      "                  tram       2244        138          1          0     0.0172    0.00545\n",
      "                person       2244       1286      0.379      0.438      0.308      0.128\n",
      "        person_sitting       2244         89          1          0          0          0\n",
      "               cyclist       2244        496          1          0          0          0\n",
      "                  misc       2244        284          0          0          0          0\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp229\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# openimages\n",
    "model = f'runs/train/Lwf1e-4_k_v_2_openimages/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1ae7e714-8439-4626-a7f3-ecc32f574fea",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9110af53-8dcb-4553-9311-2c730ed2fb74",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 之前的方法只在VOC上取得了保持，在kitt上仍然效果较差。尝试使用两个旧模型蒸馏。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "045b0834-70ac-4e2d-a458-389564b562d1",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_LwfPro: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_2oldmodels_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0001, Lwf_temperature=1.0, Old_models=['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/fcf054c3bf6b462aa9ae693566083224\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Overriding model.yaml nc=36 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_2oldmodels_openimages/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_2oldmodels_openimages\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.68G     0.0854    0.04237    0.05735         40        640: 1\n",
      "tensor([4.46835], device='cuda:0', grad_fn=<AddBackward0>) tensor(12265.26953, device='cuda:0', grad_fn=<AddBackward0>), tensor(21823.72266, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.904     0.0687     0.0798     0.0388\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.79G    0.06391    0.03808     0.0333         63        640: 1\n",
      "tensor([3.95897], device='cuda:0', grad_fn=<AddBackward0>) tensor(7361.96777, device='cuda:0', grad_fn=<AddBackward0>), tensor(19725.37109, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.634      0.197      0.216      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.79G    0.05998    0.03662    0.02886         57        640: 1\n",
      "tensor([3.66618], device='cuda:0', grad_fn=<AddBackward0>) tensor(7560.74072, device='cuda:0', grad_fn=<AddBackward0>), tensor(19448.88086, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.655      0.214      0.244      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.79G    0.05615    0.03654    0.02606         42        640: 1\n",
      "tensor([3.40638], device='cuda:0', grad_fn=<AddBackward0>) tensor(7036.13867, device='cuda:0', grad_fn=<AddBackward0>), tensor(18556.83984, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.631      0.258       0.29      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.79G    0.05305     0.0354    0.02452         36        640: 1\n",
      "tensor([3.43946], device='cuda:0', grad_fn=<AddBackward0>) tensor(7945.24316, device='cuda:0', grad_fn=<AddBackward0>), tensor(16440.12109, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.512      0.364      0.307      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.79G    0.05185    0.03615    0.02285         39        640: 1\n",
      "tensor([3.44555], device='cuda:0', grad_fn=<AddBackward0>) tensor(7789.26514, device='cuda:0', grad_fn=<AddBackward0>), tensor(18037.91602, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.528       0.32      0.317      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.79G    0.05093    0.03541    0.02163         68        640: 1\n",
      "tensor([3.73099], device='cuda:0', grad_fn=<AddBackward0>) tensor(9053.06445, device='cuda:0', grad_fn=<AddBackward0>), tensor(16310.58594, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.567      0.338      0.326      0.208\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.79G    0.05035    0.03512    0.02171         31        640: 1\n",
      "tensor([3.23069], device='cuda:0', grad_fn=<AddBackward0>) tensor(9470.69434, device='cuda:0', grad_fn=<AddBackward0>), tensor(15658.40820, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.476      0.429       0.34       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.79G    0.04975    0.03497    0.02077         35        640: 1\n",
      "tensor([3.32673], device='cuda:0', grad_fn=<AddBackward0>) tensor(8515.36523, device='cuda:0', grad_fn=<AddBackward0>), tensor(17424.43164, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.502      0.381      0.316      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.79G    0.04901    0.03472    0.02009         42        640: 1\n",
      "tensor([3.45205], device='cuda:0', grad_fn=<AddBackward0>) tensor(9288.48047, device='cuda:0', grad_fn=<AddBackward0>), tensor(16550.33203, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.456      0.381      0.339      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.79G    0.04863    0.03415    0.01951         38        640: 1\n",
      "tensor([3.37734], device='cuda:0', grad_fn=<AddBackward0>) tensor(9033.16797, device='cuda:0', grad_fn=<AddBackward0>), tensor(16817.12891, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.544      0.378      0.344       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.79G    0.04786    0.03455    0.01883         59        640: 1\n",
      "tensor([3.61669], device='cuda:0', grad_fn=<AddBackward0>) tensor(9347.22949, device='cuda:0', grad_fn=<AddBackward0>), tensor(17360.97852, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.374      0.344      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.79G    0.04738    0.03403    0.01934         46        640: 1\n",
      "tensor([3.42798], device='cuda:0', grad_fn=<AddBackward0>) tensor(9176.32227, device='cuda:0', grad_fn=<AddBackward0>), tensor(16970.35938, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.513       0.38      0.345      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.79G    0.04743    0.03364    0.01828         47        640: 1\n",
      "tensor([3.49250], device='cuda:0', grad_fn=<AddBackward0>) tensor(9314.91016, device='cuda:0', grad_fn=<AddBackward0>), tensor(17015.29688, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.527      0.353      0.344       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.79G     0.0467    0.03315    0.01822         32        640: 1\n",
      "tensor([3.09256], device='cuda:0', grad_fn=<AddBackward0>) tensor(9277.95508, device='cuda:0', grad_fn=<AddBackward0>), tensor(15761.55957, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.603      0.374      0.378      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.79G    0.04627    0.03367    0.01726         48        640: 1\n",
      "tensor([3.31286], device='cuda:0', grad_fn=<AddBackward0>) tensor(8789.23242, device='cuda:0', grad_fn=<AddBackward0>), tensor(15959.57617, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.625      0.378      0.393      0.252\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.79G    0.04627    0.03289    0.01735         43        640: 1\n",
      "tensor([3.15059], device='cuda:0', grad_fn=<AddBackward0>) tensor(8756.71973, device='cuda:0', grad_fn=<AddBackward0>), tensor(14564.15527, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.557      0.396      0.364      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.79G    0.04609    0.03327    0.01712         64        640: 1\n",
      "tensor([3.33059], device='cuda:0', grad_fn=<AddBackward0>) tensor(8746.91602, device='cuda:0', grad_fn=<AddBackward0>), tensor(15976.52246, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.587      0.379      0.369      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.79G    0.04569    0.03307    0.01712         61        640: 1\n",
      "tensor([3.22535], device='cuda:0', grad_fn=<AddBackward0>) tensor(8956.14062, device='cuda:0', grad_fn=<AddBackward0>), tensor(15376.30371, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.638      0.393      0.396      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.79G    0.04515    0.03326    0.01666         29        640: 1\n",
      "tensor([3.20163], device='cuda:0', grad_fn=<AddBackward0>) tensor(9511.19238, device='cuda:0', grad_fn=<AddBackward0>), tensor(15212.64160, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.604      0.394      0.382      0.245\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.79G    0.04477    0.03182    0.01598         39        640: 1\n",
      "tensor([3.10533], device='cuda:0', grad_fn=<AddBackward0>) tensor(8487.09375, device='cuda:0', grad_fn=<AddBackward0>), tensor(14537.73438, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.631      0.384      0.387       0.26\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.79G     0.0441    0.03237    0.01644         40        640: 1\n",
      "tensor([3.33555], device='cuda:0', grad_fn=<AddBackward0>) tensor(10699.82715, device='cuda:0', grad_fn=<AddBackward0>), tensor(15479.19336, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.58        0.4      0.374      0.241\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.79G    0.04419    0.03203     0.0162         44        640: 1\n",
      "tensor([3.16359], device='cuda:0', grad_fn=<AddBackward0>) tensor(8543.60352, device='cuda:0', grad_fn=<AddBackward0>), tensor(15105.42188, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.632      0.381      0.396      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79      5.79G      0.044    0.03232    0.01598         31        640: 1\n",
      "tensor([3.20874], device='cuda:0', grad_fn=<AddBackward0>) tensor(9309.49609, device='cuda:0', grad_fn=<AddBackward0>), tensor(16285.82715, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.592      0.386      0.388      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.79G    0.04328    0.03171    0.01546         72        640: 1\n",
      "tensor([3.34494], device='cuda:0', grad_fn=<AddBackward0>) tensor(9762.99609, device='cuda:0', grad_fn=<AddBackward0>), tensor(15854.36328, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.651      0.373      0.401      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.79G    0.04338      0.032    0.01568         41        640: 1\n",
      "tensor([3.29485], device='cuda:0', grad_fn=<AddBackward0>) tensor(9871.37891, device='cuda:0', grad_fn=<AddBackward0>), tensor(16162.23047, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.632       0.37      0.398      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.79G    0.04362    0.03187    0.01545         33        640: 1\n",
      "tensor([3.35444], device='cuda:0', grad_fn=<AddBackward0>) tensor(9070.02539, device='cuda:0', grad_fn=<AddBackward0>), tensor(16168.84766, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.594      0.416        0.4       0.26\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.79G    0.04323    0.03134    0.01494         30        640: 1\n",
      "tensor([3.12250], device='cuda:0', grad_fn=<AddBackward0>) tensor(9307.71582, device='cuda:0', grad_fn=<AddBackward0>), tensor(16062.88574, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.638      0.384      0.396      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.79G    0.04324    0.03125    0.01499         30        640: 1\n",
      "tensor([3.10610], device='cuda:0', grad_fn=<AddBackward0>) tensor(9460.49316, device='cuda:0', grad_fn=<AddBackward0>), tensor(15351.21484, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.636      0.383      0.413      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.79G    0.04248    0.03105    0.01468         43        640: 1\n",
      "tensor([3.17300], device='cuda:0', grad_fn=<AddBackward0>) tensor(9137.70117, device='cuda:0', grad_fn=<AddBackward0>), tensor(15374.94434, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.651      0.372      0.404      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.79G    0.04286    0.03085    0.01429         62        640: 1\n",
      "tensor([3.32648], device='cuda:0', grad_fn=<AddBackward0>) tensor(8902.18359, device='cuda:0', grad_fn=<AddBackward0>), tensor(15871.67578, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.639      0.378      0.408      0.275\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.79G    0.04244    0.03073     0.0144         40        640: 1\n",
      "tensor([3.17314], device='cuda:0', grad_fn=<AddBackward0>) tensor(9066.32715, device='cuda:0', grad_fn=<AddBackward0>), tensor(15999.48145, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.613      0.393      0.397      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.79G    0.04216    0.03085    0.01419         37        640: 1\n",
      "tensor([3.18031], device='cuda:0', grad_fn=<AddBackward0>) tensor(9816.54883, device='cuda:0', grad_fn=<AddBackward0>), tensor(15913.79688, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.627      0.404      0.407       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.79G    0.04148    0.03027    0.01447         45        640: 1\n",
      "tensor([3.22427], device='cuda:0', grad_fn=<AddBackward0>) tensor(8781.87500, device='cuda:0', grad_fn=<AddBackward0>), tensor(15256.60645, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482      0.443      0.404      0.266\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.79G    0.04194    0.03097    0.01403         70        640: 1\n",
      "tensor([3.24864], device='cuda:0', grad_fn=<AddBackward0>) tensor(9006.92090, device='cuda:0', grad_fn=<AddBackward0>), tensor(15910.78906, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.619      0.403      0.423      0.282\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.79G    0.04156     0.0303    0.01406         34        640: 1\n",
      "tensor([3.02684], device='cuda:0', grad_fn=<AddBackward0>) tensor(9247.70508, device='cuda:0', grad_fn=<AddBackward0>), tensor(14916.79883, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.569      0.412       0.42      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.79G    0.04105    0.02997    0.01388         52        640: 1\n",
      "tensor([3.12865], device='cuda:0', grad_fn=<AddBackward0>) tensor(9666.64258, device='cuda:0', grad_fn=<AddBackward0>), tensor(14665.57617, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.599      0.392      0.409      0.273\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.79G      0.041    0.03016    0.01375         46        640: 1\n",
      "tensor([3.04980], device='cuda:0', grad_fn=<AddBackward0>) tensor(9069.41699, device='cuda:0', grad_fn=<AddBackward0>), tensor(14559.02051, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551      0.442      0.419      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.79G    0.04034    0.02967     0.0136         42        640: 1\n",
      "tensor([3.18458], device='cuda:0', grad_fn=<AddBackward0>) tensor(8996.93066, device='cuda:0', grad_fn=<AddBackward0>), tensor(15495.79102, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.595      0.388      0.418      0.283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.79G    0.04077    0.02971    0.01347         56        640: 1\n",
      "tensor([3.06497], device='cuda:0', grad_fn=<AddBackward0>) tensor(8405.88477, device='cuda:0', grad_fn=<AddBackward0>), tensor(14288.53125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.603      0.414      0.418      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.79G    0.04081     0.0298    0.01344         47        640: 1\n",
      "tensor([3.47672], device='cuda:0', grad_fn=<AddBackward0>) tensor(11129.28223, device='cuda:0', grad_fn=<AddBackward0>), tensor(15273.62207, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556       0.44      0.425      0.283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.79G    0.04038    0.02981     0.0132         30        640: 1\n",
      "tensor([2.94716], device='cuda:0', grad_fn=<AddBackward0>) tensor(9410.06836, device='cuda:0', grad_fn=<AddBackward0>), tensor(14594.78906, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.417      0.436      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.79G    0.04013    0.02942     0.0129         31        640: 1\n",
      "tensor([2.88168], device='cuda:0', grad_fn=<AddBackward0>) tensor(9242.27441, device='cuda:0', grad_fn=<AddBackward0>), tensor(14602.26367, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.685      0.406      0.422      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.79G    0.04048    0.02998    0.01295         16        640: 1\n",
      "tensor([2.99525], device='cuda:0', grad_fn=<AddBackward0>) tensor(9398.16797, device='cuda:0', grad_fn=<AddBackward0>), tensor(15666.58008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.456      0.434      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.79G    0.03986    0.02967    0.01293         39        640: 1\n",
      "tensor([3.10413], device='cuda:0', grad_fn=<AddBackward0>) tensor(8619.69238, device='cuda:0', grad_fn=<AddBackward0>), tensor(15970.06250, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.599       0.43      0.428      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.79G    0.03948    0.02925    0.01258         45        640: 1\n",
      "tensor([2.93525], device='cuda:0', grad_fn=<AddBackward0>) tensor(8290.16211, device='cuda:0', grad_fn=<AddBackward0>), tensor(15236.57227, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616      0.406      0.417      0.288\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.79G    0.03942    0.02964    0.01309         50        640: 1\n",
      "tensor([3.14068], device='cuda:0', grad_fn=<AddBackward0>) tensor(8990.29199, device='cuda:0', grad_fn=<AddBackward0>), tensor(15288.84180, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.622      0.386      0.423      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.79G    0.03962    0.02913    0.01245         28        640: 1\n",
      "tensor([3.09523], device='cuda:0', grad_fn=<AddBackward0>) tensor(9314.20703, device='cuda:0', grad_fn=<AddBackward0>), tensor(15764.43945, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.593      0.404       0.42      0.296\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.79G    0.03863    0.02928    0.01267         36        640: 1\n",
      "tensor([3.02243], device='cuda:0', grad_fn=<AddBackward0>) tensor(8883.77246, device='cuda:0', grad_fn=<AddBackward0>), tensor(15221.30859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.57      0.432      0.421      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.79G    0.03907    0.02889    0.01251         66        640: 1\n",
      "tensor([3.15984], device='cuda:0', grad_fn=<AddBackward0>) tensor(8676.32910, device='cuda:0', grad_fn=<AddBackward0>), tensor(14630.66992, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.499      0.451       0.42      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.79G    0.03844    0.02867    0.01245         54        640: 1\n",
      "tensor([2.95506], device='cuda:0', grad_fn=<AddBackward0>) tensor(8278.98047, device='cuda:0', grad_fn=<AddBackward0>), tensor(14253.54199, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.632      0.379      0.422      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.79G    0.03873    0.02892    0.01219         41        640: 1\n",
      "tensor([2.89344], device='cuda:0', grad_fn=<AddBackward0>) tensor(8038.87354, device='cuda:0', grad_fn=<AddBackward0>), tensor(14225.16895, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616      0.401      0.424      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.79G    0.03829    0.02841    0.01226         45        640: 1\n",
      "tensor([2.98819], device='cuda:0', grad_fn=<AddBackward0>) tensor(8916.42578, device='cuda:0', grad_fn=<AddBackward0>), tensor(14788.83203, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.647      0.389      0.427      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.79G    0.03795    0.02858    0.01203         67        640: 1\n",
      "tensor([3.15745], device='cuda:0', grad_fn=<AddBackward0>) tensor(9344.17480, device='cuda:0', grad_fn=<AddBackward0>), tensor(14383.34863, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.399      0.421      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.79G    0.03803     0.0287    0.01235         61        640: 1\n",
      "tensor([3.02050], device='cuda:0', grad_fn=<AddBackward0>) tensor(8777.11230, device='cuda:0', grad_fn=<AddBackward0>), tensor(14806.33008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.613      0.416       0.43      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.79G    0.03789    0.02783    0.01197         55        640: 1\n",
      "tensor([3.10328], device='cuda:0', grad_fn=<AddBackward0>) tensor(8983.23242, device='cuda:0', grad_fn=<AddBackward0>), tensor(14621.68262, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.617      0.401      0.437      0.299\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.79G    0.03801    0.02808    0.01187         34        640: 1\n",
      "tensor([2.92320], device='cuda:0', grad_fn=<AddBackward0>) tensor(8630.21191, device='cuda:0', grad_fn=<AddBackward0>), tensor(14064.65234, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.563      0.439      0.433        0.3\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.79G    0.03765    0.02833    0.01168         36        640: 1\n",
      "tensor([3.12668], device='cuda:0', grad_fn=<AddBackward0>) tensor(8930.50586, device='cuda:0', grad_fn=<AddBackward0>), tensor(15747.69238, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.594      0.425      0.435      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.79G    0.03743    0.02836    0.01178         52        640: 1\n",
      "tensor([3.17091], device='cuda:0', grad_fn=<AddBackward0>) tensor(9731.87109, device='cuda:0', grad_fn=<AddBackward0>), tensor(14792.46289, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571       0.45      0.444      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.79G    0.03728    0.02768     0.0115         31        640: 1\n",
      "tensor([3.04994], device='cuda:0', grad_fn=<AddBackward0>) tensor(9232.63281, device='cuda:0', grad_fn=<AddBackward0>), tensor(16083.89551, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.641      0.399      0.429      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.79G    0.03711    0.02781    0.01168         49        640: 1\n",
      "tensor([2.80759], device='cuda:0', grad_fn=<AddBackward0>) tensor(7980.53418, device='cuda:0', grad_fn=<AddBackward0>), tensor(13464.80176, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.639      0.396      0.428      0.294\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.79G     0.0371     0.0275    0.01164         52        640: 1\n",
      "tensor([2.91126], device='cuda:0', grad_fn=<AddBackward0>) tensor(7956.01758, device='cuda:0', grad_fn=<AddBackward0>), tensor(13683.02832, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.65      0.394      0.428      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.79G    0.03717    0.02795    0.01157         41        640: 1\n",
      "tensor([2.94048], device='cuda:0', grad_fn=<AddBackward0>) tensor(8827.17871, device='cuda:0', grad_fn=<AddBackward0>), tensor(13358.04102, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.634      0.417      0.427      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.79G    0.03685    0.02729    0.01152         34        640: 1\n",
      "tensor([2.81153], device='cuda:0', grad_fn=<AddBackward0>) tensor(8390.98633, device='cuda:0', grad_fn=<AddBackward0>), tensor(14604.51367, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.641      0.399      0.425      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.79G    0.03647    0.02704    0.01156         37        640: 1\n",
      "tensor([2.87142], device='cuda:0', grad_fn=<AddBackward0>) tensor(8696.24512, device='cuda:0', grad_fn=<AddBackward0>), tensor(13899.46484, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.605      0.419      0.429      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.79G     0.0367    0.02735    0.01115         33        640: 1\n",
      "tensor([2.94154], device='cuda:0', grad_fn=<AddBackward0>) tensor(8482.45801, device='cuda:0', grad_fn=<AddBackward0>), tensor(14291.77637, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.659      0.391       0.43      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.79G    0.03662    0.02729    0.01146         20        640: 1\n",
      "tensor([3.02873], device='cuda:0', grad_fn=<AddBackward0>) tensor(8813.26953, device='cuda:0', grad_fn=<AddBackward0>), tensor(14855.51953, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.666      0.394      0.428        0.3\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.79G    0.03603    0.02755    0.01144         49        640: 1\n",
      "tensor([2.94756], device='cuda:0', grad_fn=<AddBackward0>) tensor(7945.24805, device='cuda:0', grad_fn=<AddBackward0>), tensor(14734.91895, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.651      0.397      0.424      0.296\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.79G    0.03572    0.02717    0.01116         93        640: 1\n",
      "tensor([3.07805], device='cuda:0', grad_fn=<AddBackward0>) tensor(8532.30469, device='cuda:0', grad_fn=<AddBackward0>), tensor(13592.44141, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.645      0.409      0.432      0.305\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.79G    0.03599    0.02727    0.01099         32        640: 1\n",
      "tensor([2.77226], device='cuda:0', grad_fn=<AddBackward0>) tensor(7971.93750, device='cuda:0', grad_fn=<AddBackward0>), tensor(14474.16113, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.634      0.409      0.432      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.79G    0.03569    0.02703    0.01121         47        640: 1\n",
      "tensor([2.96018], device='cuda:0', grad_fn=<AddBackward0>) tensor(8360.05664, device='cuda:0', grad_fn=<AddBackward0>), tensor(14433.39648, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.644      0.389      0.434      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.79G    0.03569    0.02748    0.01079         43        640: 1\n",
      "tensor([2.82738], device='cuda:0', grad_fn=<AddBackward0>) tensor(7830.73242, device='cuda:0', grad_fn=<AddBackward0>), tensor(15244.53613, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.629      0.413       0.43      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.79G    0.03562    0.02667    0.01072         37        640: 1\n",
      "tensor([2.83224], device='cuda:0', grad_fn=<AddBackward0>) tensor(8396.96973, device='cuda:0', grad_fn=<AddBackward0>), tensor(14129.54297, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.63      0.405      0.437      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.79G    0.03552    0.02686    0.01087         53        640: 1\n",
      "tensor([2.85150], device='cuda:0', grad_fn=<AddBackward0>) tensor(8073.64160, device='cuda:0', grad_fn=<AddBackward0>), tensor(13840.07520, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.627        0.4      0.433      0.305\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.79G    0.03552    0.02713    0.01098         59        640: 1\n",
      "tensor([2.98254], device='cuda:0', grad_fn=<AddBackward0>) tensor(8457.56250, device='cuda:0', grad_fn=<AddBackward0>), tensor(14277.32227, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616      0.409       0.43        0.3\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.79G    0.03496    0.02627    0.01101         26        640: 1\n",
      "tensor([2.69850], device='cuda:0', grad_fn=<AddBackward0>) tensor(7856.02734, device='cuda:0', grad_fn=<AddBackward0>), tensor(15137.99121, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.624      0.406      0.434      0.305\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.79G    0.03536    0.02661    0.01065         54        640: 1\n",
      "tensor([2.90506], device='cuda:0', grad_fn=<AddBackward0>) tensor(8443.50488, device='cuda:0', grad_fn=<AddBackward0>), tensor(13913.54004, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.621      0.406      0.437      0.309\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.79G    0.03486    0.02627    0.01072         31        640: 1\n",
      "tensor([2.93516], device='cuda:0', grad_fn=<AddBackward0>) tensor(8128.71191, device='cuda:0', grad_fn=<AddBackward0>), tensor(15008.17480, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.619      0.404      0.431      0.305\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.79G     0.0349    0.02653    0.01039         56        640: 1\n",
      "tensor([3.01630], device='cuda:0', grad_fn=<AddBackward0>) tensor(8364.14746, device='cuda:0', grad_fn=<AddBackward0>), tensor(14673.10449, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.633      0.403      0.434       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.79G     0.0346    0.02625    0.01067         45        640: 1\n",
      "tensor([2.72309], device='cuda:0', grad_fn=<AddBackward0>) tensor(8130.05176, device='cuda:0', grad_fn=<AddBackward0>), tensor(13639.83594, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.679      0.393       0.43      0.305\n",
      "\n",
      "80 epochs completed in 0.928 hours.\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_2oldmodels_openimages/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.634      0.403      0.434       0.31\n",
      "                   car       1200        287      0.703      0.595      0.591      0.402\n",
      "                   van       1200         29      0.787      0.509      0.645       0.49\n",
      "                 truck       1200         29      0.518      0.483      0.466      0.326\n",
      "                person       1200       2264       0.49      0.341      0.319      0.164\n",
      "               bicycle       1200         54      0.635        0.5      0.538      0.344\n",
      "                  bird       1200        136        0.6      0.603      0.526      0.341\n",
      "                  boat       1200        145      0.636      0.471      0.491      0.277\n",
      "                bottle       1200         31          0          0    0.00522    0.00248\n",
      "                   bus       1200         15      0.677      0.667      0.771      0.716\n",
      "                   cat       1200          1          1          0    0.00357    0.00178\n",
      "                 chair       1200         21     0.0987      0.143     0.0851     0.0397\n",
      "                   dog       1200         42      0.782      0.571      0.668      0.447\n",
      "                 horse       1200         44      0.881      0.673       0.78      0.562\n",
      "                 sheep       1200         10      0.371        0.4      0.462       0.27\n",
      "             billboard       1200          4          1          0     0.0828     0.0166\n",
      "                rabbit       1200         11      0.645      0.545      0.669      0.553\n",
      "                monkey       1200         18      0.676      0.944      0.812      0.649\n",
      "                   pig       1200          6       0.83      0.667      0.732      0.607\n",
      "                   toy       1200         64      0.357      0.104      0.154     0.0788\n",
      "         traffic light       1200         18          1          0     0.0621     0.0319\n",
      "          traffic sign       1200          4      0.625       0.25      0.246      0.197\n",
      "Results saved to \u001b[1mruns/train/k_v_2oldmodels_openimages\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/fcf054c3bf6b462aa9ae693566083224\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.5595122526204986\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.5381570756171639\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.34391132116464096\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.635105353042263\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 27.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.08284313725490194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.016568627450980387\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6012296790793\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 55.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5256144911773764\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.34097848694014854\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.5995278706332429\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.6029411764705882\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 82.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5413024829947908\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 39.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.4914841485700547\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.2768909904913282\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.6364598369523855\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.47089829811171846\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 68.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.005220514601106788\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.002480149004454689\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.671844710287538\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.7713678724363309\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.716067473636073\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.6771038199609628\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.644744443345815\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 72.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.5910633237369111\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.40186605811173026\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.7034459954570148\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.595085424353717\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 171.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.0035663082437276\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.0017831541218638\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.11675037946125741\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 27.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.08506233724975722\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.03970188893139969\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.09871117451762614\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.14285714285714285\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6603217678687081\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.66800636885238\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.44692282996205507\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.781966968754256\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5714285714285714\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.7629864573868468\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.7798544553183759\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.5622304062772653\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.8809513720924068\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.6728831789072752\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 30.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (2.905057430267334, 12.375486373901367)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.07976901363675562, 0.44424204990696864)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.038765192749520665, 0.31032096226922384)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.4560422466782233, 0.9036782456738396)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.0686672685936839, 0.4562628106510785)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.787696287848967\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8117701103782474\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.649023242711382\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.6755724496023003\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.9444444444444444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.40238492040454316\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 805.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.3193577988870479\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.16407184560155355\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.4898319352180871\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.3414310954063604\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 773.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.7395336387026243\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.7316965124907246\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.6069857620578778\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.8302840676069497\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.5911520597272321\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.6686554464285717\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.553232723214286\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.6452066862781148\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.5454545454545454\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.3851819102025619\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.4622522160004257\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.2704879049114194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.3714224810115221\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.16128787636376102\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.15393890231080914\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.07875885234341777\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.3567883673146832\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.10419481981981989\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.062065625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.031872630616830065\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.3571238668343989\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.24643396226415096\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.19714716981132077\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.6248836998207274\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.25\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.03460298478603363, 0.08540180325508118)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.01038638036698103, 0.05735454335808754)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.0262515377253294, 0.04237043485045433)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.4997348359456104\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.46590259265388556\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.32581733248820716\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.517948504059615\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.4827586206896552\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.05115393176674843, 0.06680978089570999)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.01888212002813816, 0.03445659205317497)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.023570312187075615, 0.027471667155623436)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.6179591771365428\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.6445282884395391\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.49048616471856105\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.7866279234700287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.5088512916099124\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/fcf054c3bf6b462aa9ae693566083224\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : 0.0001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : ['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 80\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.38 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_LwfPro.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "        ./runs/train/fog_02/weights/last.pt \\\n",
    "--name k_v_2oldmodels_openimages \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "247f1b0d-13e0-49af-97e6-24c5f0c282c0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_2oldmodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.632      0.445       0.45      0.312\n",
      "                   car        600        113      0.509       0.46      0.485      0.322\n",
      "                   van        600          6      0.435      0.167      0.094     0.0839\n",
      "                 truck        600         17      0.871      0.399      0.635      0.534\n",
      "                person        600       1131      0.533      0.312      0.345      0.182\n",
      "               bicycle        600         43      0.754      0.395      0.543      0.336\n",
      "                  bird        600         61      0.692      0.492      0.547      0.398\n",
      "                  boat        600         82      0.777      0.451      0.543      0.293\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.653      0.333      0.355      0.316\n",
      "                   cat        600          5          1          0      0.693      0.378\n",
      "                 chair        600         12      0.338      0.333      0.287      0.139\n",
      "                   dog        600         25      0.736       0.84      0.722      0.507\n",
      "                 horse        600         37      0.811      0.757      0.764      0.501\n",
      "                 sheep        600          8      0.744      0.625      0.708      0.546\n",
      "                 train        600          2          1          0     0.0171    0.00433\n",
      "             billboard        600          3          0          0     0.0592     0.0533\n",
      "                rabbit        600          1      0.222          1      0.497      0.448\n",
      "                monkey        600         16      0.693      0.875      0.746      0.498\n",
      "                   pig        600          7       0.51          1       0.91      0.715\n",
      "                   toy        600         42      0.456      0.143      0.223      0.128\n",
      "         traffic light        600          5       0.88        0.2      0.235      0.143\n",
      "          traffic sign        600          1      0.296          1      0.497      0.348\n",
      "Speed: 0.1ms pre-process, 3.3ms inference, 1.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp230\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_2oldmodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.57      0.455      0.508      0.311\n",
      "                   car       4952       1201      0.541      0.862      0.802      0.574\n",
      "                person       4952       4528      0.379      0.644      0.502      0.292\n",
      "             aeroplane       4952        285      0.887      0.138      0.323      0.156\n",
      "               bicycle       4952        337      0.502      0.804      0.722      0.463\n",
      "                  bird       4952        459      0.558      0.715      0.676        0.4\n",
      "                  boat       4952        263      0.244      0.696      0.569      0.296\n",
      "                bottle       4952        469      0.569      0.324      0.378      0.226\n",
      "                   bus       4952        213      0.658      0.756      0.765      0.575\n",
      "                   cat       4952        358      0.786      0.288      0.658      0.413\n",
      "                 chair       4952        756      0.418      0.381      0.352      0.196\n",
      "                   cow       4952        244      0.508      0.072      0.226      0.146\n",
      "           diningtable       4952        206      0.443      0.107      0.212     0.0785\n",
      "                   dog       4952        489      0.625      0.727      0.732      0.463\n",
      "                 horse       4952        348      0.657      0.805      0.809      0.521\n",
      "             motorbike       4952        325      0.814      0.202      0.475      0.226\n",
      "           pottedplant       4952        480      0.362     0.0402     0.0595     0.0251\n",
      "                 sheep       4952        242      0.334       0.64      0.501      0.307\n",
      "                  sofa       4952        239      0.679      0.364      0.484      0.316\n",
      "                 train       4952        282      0.811      0.427      0.633      0.395\n",
      "             tvmonitor       4952        308      0.632      0.104      0.273      0.144\n",
      "Speed: 0.1ms pre-process, 1.5ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp231\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_2oldmodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.752       0.18      0.206     0.0996\n",
      "                   car       2244       8711      0.718      0.631      0.675      0.337\n",
      "                   van       2244        861      0.625     0.0988      0.251      0.139\n",
      "                 truck       2244        333      0.253      0.288      0.227      0.126\n",
      "                  tram       2244        138          1          0      0.037     0.0138\n",
      "                person       2244       1286      0.423       0.42      0.337      0.148\n",
      "        person_sitting       2244         89          1          0     0.0469     0.0119\n",
      "               cyclist       2244        496          1          0     0.0498     0.0113\n",
      "                  misc       2244        284          1          0     0.0267     0.0097\n",
      "Speed: 0.0ms pre-process, 1.2ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp232\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# openimages\n",
    "model = f'runs/train/k_v_2oldmodels_openimages/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9045b2ce-d912-46f7-ab8f-8011df18407a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3a524b09-0d99-402f-af41-95de6834aba9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "065271d9-8c98-4e72-9cb2-797122efc461",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_LwfPro: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_kittimodels_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0001, Lwf_temperature=1.0, Old_models=['./runs/train/fog_02/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/1732dda0c4314ee5b549de224733a205\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_kittimodels_openimages/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_kittimodels_openimages\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.51G    0.08482    0.04267    0.06392         40        640: 1\n",
      "tensor([3.03781], device='cuda:0', grad_fn=<AddBackward0>) tensor(19149.40820, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.845     0.0278     0.0462     0.0231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.62G    0.06187    0.03833    0.03818         63        640: 1\n",
      "tensor([2.94916], device='cuda:0', grad_fn=<AddBackward0>) tensor(16767.29102, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.718      0.151      0.152     0.0878\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.62G    0.05808    0.03671    0.03158         57        640: 1\n",
      "tensor([2.55724], device='cuda:0', grad_fn=<AddBackward0>) tensor(16304.03906, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.669      0.222      0.213      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.62G    0.05507    0.03684    0.02737         42        640: 1\n",
      "tensor([2.28952], device='cuda:0', grad_fn=<AddBackward0>) tensor(14393.17871, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.638      0.271      0.254      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.62G    0.05222    0.03567    0.02455         36        640: 1\n",
      "tensor([2.25998], device='cuda:0', grad_fn=<AddBackward0>) tensor(13098.95312, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.291      0.284      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.62G    0.05154    0.03637    0.02281         39        640: 1\n",
      "tensor([2.26006], device='cuda:0', grad_fn=<AddBackward0>) tensor(13934.70508, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.633      0.259      0.278      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.62G    0.05048    0.03552    0.02122         68        640: 1\n",
      "tensor([2.50478], device='cuda:0', grad_fn=<AddBackward0>) tensor(12946.87598, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.605      0.276      0.273      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.62G    0.05006    0.03535    0.02126         31        640: 1\n",
      "tensor([1.87299], device='cuda:0', grad_fn=<AddBackward0>) tensor(11704.27344, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.515      0.342      0.295      0.185\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.62G    0.04938    0.03512    0.02052         35        640: 1\n",
      "tensor([2.11993], device='cuda:0', grad_fn=<AddBackward0>) tensor(13477.59863, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.313      0.272      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.62G    0.04857    0.03485    0.01932         42        640: 1\n",
      "tensor([2.13440], device='cuda:0', grad_fn=<AddBackward0>) tensor(12427.99805, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.552      0.352      0.303      0.189\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.62G    0.04827    0.03421    0.01885         38        640: 1\n",
      "tensor([2.09602], device='cuda:0', grad_fn=<AddBackward0>) tensor(12719.68848, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.497      0.351      0.293      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.62G    0.04767    0.03459    0.01798         59        640: 1\n",
      "tensor([2.24491], device='cuda:0', grad_fn=<AddBackward0>) tensor(13078.49414, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.554      0.324      0.295      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.62G    0.04721     0.0341    0.01841         46        640: 1\n",
      "tensor([2.15217], device='cuda:0', grad_fn=<AddBackward0>) tensor(13206.22363, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.622      0.331      0.317      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.62G    0.04704     0.0336     0.0173         47        640: 1\n",
      "tensor([2.13416], device='cuda:0', grad_fn=<AddBackward0>) tensor(13135.82422, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.628      0.325      0.326      0.206\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.62G     0.0464    0.03315    0.01709         32        640: 1\n",
      "tensor([1.80502], device='cuda:0', grad_fn=<AddBackward0>) tensor(11866.50879, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.597       0.36      0.332      0.208\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.62G    0.04606     0.0335    0.01617         48        640: 1\n",
      "tensor([1.99835], device='cuda:0', grad_fn=<AddBackward0>) tensor(11941.58496, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.59      0.343      0.342      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.62G    0.04575    0.03281     0.0162         43        640: 1\n",
      "tensor([1.86859], device='cuda:0', grad_fn=<AddBackward0>) tensor(10825.84180, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.554      0.371      0.327      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.62G    0.04598    0.03313    0.01589         64        640: 1\n",
      "tensor([2.02297], device='cuda:0', grad_fn=<AddBackward0>) tensor(12104.37500, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.566      0.366      0.336      0.206\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.62G    0.04532    0.03289    0.01611         61        640: 1\n",
      "tensor([2.00374], device='cuda:0', grad_fn=<AddBackward0>) tensor(11666.90332, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.661      0.363      0.357       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.62G     0.0449    0.03314     0.0153         29        640: 1\n",
      "tensor([1.92233], device='cuda:0', grad_fn=<AddBackward0>) tensor(11474.73633, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.611      0.368      0.347      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.62G    0.04441    0.03174    0.01481         39        640: 1\n",
      "tensor([1.93484], device='cuda:0', grad_fn=<AddBackward0>) tensor(10932.53418, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.596      0.348       0.33       0.21\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.62G    0.04377    0.03215    0.01515         40        640: 1\n",
      "tensor([1.85239], device='cuda:0', grad_fn=<AddBackward0>) tensor(11596.44141, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.368      0.333      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.62G    0.04372    0.03193     0.0149         44        640: 1\n",
      "tensor([1.95729], device='cuda:0', grad_fn=<AddBackward0>) tensor(11557.26465, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.357      0.342      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79      5.62G    0.04352    0.03201    0.01445         31        640: 1\n",
      "tensor([1.83936], device='cuda:0', grad_fn=<AddBackward0>) tensor(11962.02344, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.389      0.347      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.62G    0.04312    0.03143    0.01429         72        640: 1\n",
      "tensor([1.90368], device='cuda:0', grad_fn=<AddBackward0>) tensor(11569.11133, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.359      0.359      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.62G    0.04261    0.03178    0.01433         41        640: 1\n",
      "tensor([1.81437], device='cuda:0', grad_fn=<AddBackward0>) tensor(11624.93555, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.46      0.378      0.363      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.62G    0.04293    0.03145    0.01372         33        640: 1\n",
      "tensor([1.99641], device='cuda:0', grad_fn=<AddBackward0>) tensor(11626.25879, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.659       0.36       0.37      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.62G    0.04276    0.03096    0.01349         30        640: 1\n",
      "tensor([1.75501], device='cuda:0', grad_fn=<AddBackward0>) tensor(11807.45703, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.634      0.362      0.375      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.62G    0.04261    0.03083    0.01326         30        640: 1\n",
      "tensor([1.68915], device='cuda:0', grad_fn=<AddBackward0>) tensor(10904.13672, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.648       0.39      0.376      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.62G    0.04199    0.03061    0.01297         43        640: 1\n",
      "tensor([1.83010], device='cuda:0', grad_fn=<AddBackward0>) tensor(11418.24707, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.506      0.401      0.378      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.62G    0.04211    0.03041    0.01297         62        640: 1\n",
      "tensor([1.99679], device='cuda:0', grad_fn=<AddBackward0>) tensor(11651.87500, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.376      0.358      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.62G     0.0419    0.03023    0.01279         40        640: 1\n",
      "tensor([1.81248], device='cuda:0', grad_fn=<AddBackward0>) tensor(12028.38086, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.588      0.387      0.368      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.62G    0.04127    0.03002    0.01234         37        640: 1\n",
      "tensor([1.75431], device='cuda:0', grad_fn=<AddBackward0>) tensor(11791.01465, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.663      0.362      0.382      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.62G    0.04061     0.0296    0.01281         45        640: 1\n",
      "tensor([1.92835], device='cuda:0', grad_fn=<AddBackward0>) tensor(11265.05078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.65      0.346      0.371      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.62G    0.04118    0.03023    0.01225         70        640: 1\n",
      "tensor([1.91171], device='cuda:0', grad_fn=<AddBackward0>) tensor(11790.47070, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.586      0.373      0.374      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.62G    0.04081     0.0296    0.01232         34        640: 1\n",
      "tensor([1.65669], device='cuda:0', grad_fn=<AddBackward0>) tensor(10725.61426, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.465      0.381       0.38      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.62G    0.04011    0.02924    0.01216         52        640: 1\n",
      "tensor([1.70394], device='cuda:0', grad_fn=<AddBackward0>) tensor(10467.58984, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.565      0.406      0.388      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.62G    0.04013    0.02937    0.01187         46        640: 1\n",
      "tensor([1.67168], device='cuda:0', grad_fn=<AddBackward0>) tensor(10257.61230, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.45      0.438      0.378      0.241\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.62G    0.03944    0.02877    0.01167         42        640: 1\n",
      "tensor([1.79169], device='cuda:0', grad_fn=<AddBackward0>) tensor(10763.08008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.491      0.391      0.381      0.245\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.62G    0.03987    0.02887    0.01164         56        640: 1\n",
      "tensor([1.75440], device='cuda:0', grad_fn=<AddBackward0>) tensor(9956.24121, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.507      0.445      0.398      0.257\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.62G    0.03988    0.02881    0.01149         47        640: 1\n",
      "tensor([1.87445], device='cuda:0', grad_fn=<AddBackward0>) tensor(11085.18457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.563      0.386      0.378      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.62G    0.03931    0.02888    0.01132         30        640: 1\n",
      "tensor([1.54430], device='cuda:0', grad_fn=<AddBackward0>) tensor(10144.43652, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.547      0.401      0.386       0.25\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.62G    0.03916    0.02837    0.01113         31        640: 1\n",
      "tensor([1.51485], device='cuda:0', grad_fn=<AddBackward0>) tensor(10422.17578, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.584      0.367      0.389      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.62G    0.03941     0.0288    0.01103         16        640: 1\n",
      "tensor([1.64652], device='cuda:0', grad_fn=<AddBackward0>) tensor(10982.79492, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.566        0.4      0.408      0.274\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.62G    0.03868    0.02854    0.01076         39        640: 1\n",
      "tensor([1.76303], device='cuda:0', grad_fn=<AddBackward0>) tensor(11508.44824, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.57      0.405      0.388      0.256\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.62G    0.03823    0.02811    0.01068         45        640: 1\n",
      "tensor([1.66240], device='cuda:0', grad_fn=<AddBackward0>) tensor(10825.28711, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.376      0.372      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.62G    0.03814    0.02848    0.01098         50        640: 1\n",
      "tensor([1.74796], device='cuda:0', grad_fn=<AddBackward0>) tensor(10820.07520, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.554      0.381       0.39      0.258\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.62G    0.03864    0.02794    0.01053         28        640: 1\n",
      "tensor([1.69633], device='cuda:0', grad_fn=<AddBackward0>) tensor(11248.62891, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.503      0.403      0.379      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.62G    0.03741    0.02797    0.01048         36        640: 1\n",
      "tensor([1.63861], device='cuda:0', grad_fn=<AddBackward0>) tensor(10416.07617, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.583      0.404      0.387      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.62G    0.03783    0.02774    0.01033         66        640: 1\n",
      "tensor([1.85485], device='cuda:0', grad_fn=<AddBackward0>) tensor(10459.31543, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.567      0.404      0.396      0.258\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.62G     0.0373    0.02745    0.01034         54        640: 1\n",
      "tensor([1.66374], device='cuda:0', grad_fn=<AddBackward0>) tensor(10017.02246, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.506      0.422      0.405       0.26\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.62G    0.03737    0.02748    0.01002         41        640: 1\n",
      "tensor([1.64484], device='cuda:0', grad_fn=<AddBackward0>) tensor(10138.76465, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.557      0.423      0.397      0.257\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.62G    0.03692    0.02705    0.01009         45        640: 1\n",
      "tensor([1.57896], device='cuda:0', grad_fn=<AddBackward0>) tensor(10230.06055, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.458      0.414      0.385      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.62G    0.03648    0.02712   0.009889         67        640: 1\n",
      "tensor([1.72392], device='cuda:0', grad_fn=<AddBackward0>) tensor(10316.37305, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.57      0.402      0.396      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.62G    0.03665    0.02727    0.01019         61        640: 1\n",
      "tensor([1.66268], device='cuda:0', grad_fn=<AddBackward0>) tensor(10408.86035, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.516      0.408      0.405      0.263\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.62G    0.03633    0.02637   0.009728         55        640: 1\n",
      "tensor([1.68738], device='cuda:0', grad_fn=<AddBackward0>) tensor(9811.88281, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.433      0.417       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.62G    0.03631    0.02652   0.009674         34        640: 1\n",
      "tensor([1.57159], device='cuda:0', grad_fn=<AddBackward0>) tensor(9805.40332, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.54      0.413      0.402      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.62G    0.03604    0.02666   0.009368         36        640: 1\n",
      "tensor([1.74498], device='cuda:0', grad_fn=<AddBackward0>) tensor(11464.82812, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.615      0.385      0.408      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.62G    0.03585    0.02676   0.009554         52        640: 1\n",
      "tensor([1.70821], device='cuda:0', grad_fn=<AddBackward0>) tensor(10392.27441, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.587      0.392      0.405      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.62G    0.03577    0.02608   0.009343         31        640: 1\n",
      "tensor([1.60773], device='cuda:0', grad_fn=<AddBackward0>) tensor(11137.01465, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.58      0.406      0.409      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.62G    0.03539    0.02616   0.009269         49        640: 1\n",
      "tensor([1.49804], device='cuda:0', grad_fn=<AddBackward0>) tensor(8999.52148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.587      0.394      0.404      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.62G    0.03532    0.02584   0.009164         52        640: 1\n",
      "tensor([1.56350], device='cuda:0', grad_fn=<AddBackward0>) tensor(9198.21094, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.618      0.369      0.405      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.62G    0.03547    0.02609   0.009274         41        640: 1\n",
      "tensor([1.56477], device='cuda:0', grad_fn=<AddBackward0>) tensor(8911.93457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.602      0.394      0.412      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.62G    0.03504    0.02551   0.009146         34        640: 1\n",
      "tensor([1.54568], device='cuda:0', grad_fn=<AddBackward0>) tensor(10384.70215, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.587      0.396      0.402      0.269\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.62G    0.03462    0.02534   0.009227         37        640: 1\n",
      "tensor([1.57121], device='cuda:0', grad_fn=<AddBackward0>) tensor(9869.25195, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.608      0.385      0.408       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.62G    0.03482    0.02547   0.008908         33        640: 1\n",
      "tensor([1.56004], device='cuda:0', grad_fn=<AddBackward0>) tensor(9599.52148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.387      0.397      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.62G    0.03466    0.02537   0.008887         20        640: 1\n",
      "tensor([1.59732], device='cuda:0', grad_fn=<AddBackward0>) tensor(10278.78418, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.642      0.391        0.4      0.269\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.62G    0.03417    0.02562   0.009021         49        640: 1\n",
      "tensor([1.66867], device='cuda:0', grad_fn=<AddBackward0>) tensor(10031.64648, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.622      0.396      0.394      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.62G    0.03378    0.02508   0.008584         93        640: 1\n",
      "tensor([1.73743], device='cuda:0', grad_fn=<AddBackward0>) tensor(9278.79883, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.541      0.423      0.411      0.275\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.62G    0.03403    0.02526   0.008461         32        640: 1\n",
      "tensor([1.48205], device='cuda:0', grad_fn=<AddBackward0>) tensor(9797.10645, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.561      0.403      0.399      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.62G    0.03354    0.02507   0.008758         47        640: 1\n",
      "tensor([1.62552], device='cuda:0', grad_fn=<AddBackward0>) tensor(10336.33594, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.597      0.384      0.401      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.62G     0.0337    0.02539   0.008456         43        640: 1\n",
      "tensor([1.56015], device='cuda:0', grad_fn=<AddBackward0>) tensor(10616.28125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.592      0.391        0.4      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.62G    0.03343    0.02459   0.008168         37        640: 1\n",
      "tensor([1.46580], device='cuda:0', grad_fn=<AddBackward0>) tensor(9522.37695, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.594      0.384      0.397      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.62G    0.03351    0.02486   0.008314         53        640: 1\n",
      "tensor([1.54170], device='cuda:0', grad_fn=<AddBackward0>) tensor(9209.90039, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616      0.375      0.409       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.62G    0.03337      0.025   0.008304         59        640: 1\n",
      "tensor([1.67353], device='cuda:0', grad_fn=<AddBackward0>) tensor(9844.80078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.626      0.377      0.418       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.62G    0.03284    0.02428   0.008466         26        640: 1\n",
      "tensor([1.39675], device='cuda:0', grad_fn=<AddBackward0>) tensor(10510.00488, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.686       0.37      0.427      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.62G    0.03321    0.02454   0.008147         54        640: 1\n",
      "tensor([1.58493], device='cuda:0', grad_fn=<AddBackward0>) tensor(9793.95020, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.613      0.394      0.416      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.62G    0.03264    0.02396   0.008047         31        640: 1\n",
      "tensor([1.59759], device='cuda:0', grad_fn=<AddBackward0>) tensor(10451.27734, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.611      0.389      0.415       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.62G    0.03281    0.02435   0.007927         56        640: 1\n",
      "tensor([1.68353], device='cuda:0', grad_fn=<AddBackward0>) tensor(10352.34277, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.608      0.381      0.417       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.62G     0.0326    0.02409   0.008147         45        640: 1\n",
      "tensor([1.37281], device='cuda:0', grad_fn=<AddBackward0>) tensor(9078.60254, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.619      0.378      0.414      0.279\n",
      "\n",
      "80 epochs completed in 0.818 hours.\n",
      "Optimizer stripped from runs/train/k_v_kittimodels_openimages/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_kittimodels_openimages/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_kittimodels_openimages/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.688       0.37      0.427      0.281\n",
      "                   car       1200        287      0.737      0.536      0.583      0.386\n",
      "                   van       1200         29      0.614      0.379      0.509       0.34\n",
      "                 truck       1200         29      0.568      0.379      0.463      0.349\n",
      "                person       1200       2264      0.536      0.291      0.316      0.163\n",
      "               bicycle       1200         54      0.704      0.389      0.464      0.273\n",
      "                  bird       1200        136      0.685      0.512      0.526      0.318\n",
      "                  boat       1200        145      0.726      0.348       0.43      0.231\n",
      "                bottle       1200         31          1          0          0          0\n",
      "                   bus       1200         15      0.844      0.723      0.758      0.637\n",
      "                   cat       1200          1          1          0     0.0383      0.023\n",
      "                 chair       1200         21      0.188      0.143     0.0808     0.0498\n",
      "                   dog       1200         42      0.839      0.452      0.587      0.385\n",
      "                 horse       1200         44      0.883      0.513      0.671      0.427\n",
      "                 sheep       1200         10      0.768        0.4      0.504      0.273\n",
      "             billboard       1200          4          1          0      0.246     0.0993\n",
      "                rabbit       1200         11      0.948      0.455       0.62      0.417\n",
      "                monkey       1200         18      0.719      0.944      0.822      0.591\n",
      "                   pig       1200          6      0.593      0.667       0.68       0.53\n",
      "                   toy       1200         64      0.301     0.0781     0.0887     0.0401\n",
      "         traffic light       1200         18      0.325     0.0556     0.0852     0.0535\n",
      "          traffic sign       1200          4      0.465        0.5      0.497      0.323\n",
      "Results saved to \u001b[1mruns/train/k_v_kittimodels_openimages\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_kittimodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/1732dda0c4314ee5b549de224733a205\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.5010352342021696\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.4636120638670941\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.27258682276946916\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.7040732986174648\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.3888888888888889\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.2463240418118467\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.09929022603412847\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.5862549344884677\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 32.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5260419810460827\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.3176587519995152\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.6852402392685834\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.51225757007007\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 70.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.4703823437236534\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.43036494120102675\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.23093291938349098\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.7263413090352562\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.3478143457859482\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 50.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.7791466334219407\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.7584653188850948\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.637452126645121\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.8442385120162897\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.7233735958180402\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6207606020778088\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 55.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.5834847227683676\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.3864102088670821\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.7367479275374011\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5363259997406339\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 154.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.038269230769230785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.02296153846153847\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.1622477475839304\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.08082943653129929\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.04984835457138704\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.18772899933725967\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.14285714285714285\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.5879289014057013\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.5869309787598471\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.38479186037444735\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.8394574855927514\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.4523809523809524\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.6491678755774791\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.6712066928024785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.42710916618379685\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.8827108285172801\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.513348776789637\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 23.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (1.5715925693511963, 7.002405643463135)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.04617262871500649, 0.42698644934379115)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.023142386932740414, 0.2814897211425035)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.45002506181443486, 0.8453386985513147)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.027786960626734955, 0.4453806874016964)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.8166099498828344\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8216711504860538\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.5913570205692765\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.7192556921427556\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.9444444444444444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.3768958270830225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 570.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.31628422561811564\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.1631392640918052\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.5359698469976445\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.29063604240282687\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 658.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.6275262557468693\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.6797921348314606\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.5299527191011235\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.5927268944300574\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.614505527103695\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.6198757663847781\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.417142295148468\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.9481824666716373\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.45454545454545453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.5259831138308385\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.5042337164750959\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.27265236701995327\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.7678112413935374\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.1240103880159588\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.08868397748631947\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.04008682094216955\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.3005095585065529\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.078125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0949052427031068\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.0852355795148248\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.05349087315680989\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 0.32534593955494406\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.05555555555555555\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.4819550153823405\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.49725663716814156\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.32319026548672564\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.4651671473453652\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.03259593993425369, 0.08482443541288376)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.007927466183900833, 0.06391534209251404)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.023962654173374176, 0.04267418384552002)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.4547942282743978\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.46315959273833157\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.3487816897988617\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.5677851585683963\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.3793103448275862\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.05212518945336342, 0.0646343007683754)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.019415035843849182, 0.04070507362484932)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.02375040575861931, 0.02981921285390854)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.46894569341291614\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.5090946784379785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.3399946179417781\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.6140535254382344\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.3793103448275862\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_kittimodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/1732dda0c4314ee5b549de224733a205\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_kittimodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.40 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_LwfPro.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/fog_02/weights/last.pt \\\n",
    "--name k_v_kittimodels_openimages \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "906fdf50-9347-4cf0-adf5-02ba060aad5d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_kittimodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.508      0.328       0.33       0.22\n",
      "                   car        600        113      0.493      0.442      0.401      0.273\n",
      "                   van        600          6       0.11      0.167     0.0265     0.0258\n",
      "                 truck        600         17      0.793      0.529      0.754      0.594\n",
      "                person        600       1131       0.45      0.333      0.311      0.161\n",
      "               bicycle        600         43      0.597      0.419      0.462      0.269\n",
      "                  bird        600         61      0.608      0.525       0.51      0.353\n",
      "                  boat        600         82      0.665      0.451      0.459      0.263\n",
      "                bottle        600          1          1          0          0          0\n",
      "                   bus        600          3      0.325      0.333       0.17      0.153\n",
      "                   cat        600          5          1          0      0.211      0.129\n",
      "                 chair        600         12      0.382       0.25       0.26      0.131\n",
      "                   dog        600         25      0.621       0.64      0.583      0.399\n",
      "                 horse        600         37      0.777      0.565      0.638      0.349\n",
      "                 sheep        600          8      0.689      0.625      0.639      0.485\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          0          0     0.0108    0.00539\n",
      "                rabbit        600          1          0          0     0.0663     0.0597\n",
      "                monkey        600         16      0.582       0.75       0.59      0.391\n",
      "                   pig        600          7      0.536          1      0.797      0.607\n",
      "                   toy        600         42      0.556      0.179      0.188     0.0786\n",
      "         traffic light        600          5          0          0      0.123     0.0951\n",
      "          traffic sign        600          1          0          0     0.0585     0.0117\n",
      "Speed: 0.1ms pre-process, 3.5ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp233\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_kittimodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.722      0.237      0.282      0.171\n",
      "                   car       4952       1201      0.734      0.712      0.735      0.511\n",
      "                person       4952       4528      0.469       0.49       0.44      0.247\n",
      "             aeroplane       4952        285          1          0          0          0\n",
      "               bicycle       4952        337      0.565      0.564      0.541      0.321\n",
      "                  bird       4952        459      0.632      0.455      0.489      0.269\n",
      "                  boat       4952        263      0.357      0.502        0.4      0.193\n",
      "                bottle       4952        469       0.64     0.0405     0.0913     0.0493\n",
      "                   bus       4952        213      0.705      0.451      0.567      0.416\n",
      "                   cat       4952        358          1          0      0.431      0.265\n",
      "                 chair       4952        756      0.464      0.141      0.173       0.09\n",
      "                   cow       4952        244          1          0      0.167      0.134\n",
      "           diningtable       4952        206          1          0          0          0\n",
      "                   dog       4952        489      0.648      0.357      0.463      0.256\n",
      "                 horse       4952        348      0.772      0.575      0.666      0.386\n",
      "             motorbike       4952        325          1          0          0          0\n",
      "           pottedplant       4952        480          0          0          0          0\n",
      "                 sheep       4952        242      0.456      0.443      0.393      0.228\n",
      "                  sofa       4952        239          1          0          0          0\n",
      "                 train       4952        282          1          0     0.0775     0.0473\n",
      "             tvmonitor       4952        308          1          0          0          0\n",
      "Speed: 0.1ms pre-process, 1.5ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp234\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_kittimodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.791      0.199      0.274      0.132\n",
      "                   car       2244       8711      0.754       0.59      0.664      0.327\n",
      "                   van       2244        861      0.736      0.123      0.296       0.17\n",
      "                 truck       2244        333      0.435      0.498       0.44      0.235\n",
      "                  tram       2244        138          1          0      0.233      0.119\n",
      "                person       2244       1286      0.403      0.379       0.28      0.114\n",
      "        person_sitting       2244         89          1          0      0.101     0.0364\n",
      "               cyclist       2244        496          1          0      0.105     0.0226\n",
      "                  misc       2244        284          1          0     0.0711     0.0313\n",
      "Speed: 0.0ms pre-process, 1.1ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp235\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# openimages\n",
    "model = f'runs/train/k_v_kittimodels_openimages/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4cc6855c-343d-4404-9020-0f927ed6d617",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "01f54df6-5de7-43d0-90ef-d7d903bfb13d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d75e7a70-f966-4720-988a-167133f0fd3d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13bec009-2b23-4b87-a83d-1ea1e76a90bd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b949d214-2ffe-4cef-9a58-8eca3565ff84",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "abc1f1a2-743e-45ff-afc8-bd555927fde8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_2oldmodels_openimages2/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.558      0.347      0.348      0.228\n",
      "                   car        600        113      0.443      0.496      0.361      0.238\n",
      "                   van        600          6          0          0     0.0141    0.00741\n",
      "                 truck        600         17      0.634      0.235      0.407      0.331\n",
      "                person        600       1131      0.445      0.333      0.327      0.159\n",
      "               bicycle        600         43      0.674      0.512      0.499      0.311\n",
      "                  bird        600         61      0.413      0.574       0.51      0.343\n",
      "                  boat        600         82      0.534      0.447      0.481      0.258\n",
      "                bottle        600          1          1          0     0.0302    0.00634\n",
      "                   bus        600          3       0.17      0.489      0.515      0.446\n",
      "                   cat        600          5      0.383        0.2       0.32      0.177\n",
      "                 chair        600         12      0.315      0.417      0.221       0.12\n",
      "                   dog        600         25      0.604        0.8      0.655      0.509\n",
      "                 horse        600         37      0.581      0.784      0.734      0.466\n",
      "                 sheep        600          8      0.307      0.625       0.61      0.427\n",
      "                 train        600          2          1          0    0.00587    0.00352\n",
      "             billboard        600          3          1          0     0.0675      0.054\n",
      "                rabbit        600          1          0          0     0.0585     0.0527\n",
      "                monkey        600         16      0.685       0.75      0.706      0.472\n",
      "                   pig        600          7      0.421      0.857       0.62      0.456\n",
      "                   toy        600         42      0.674      0.119      0.192     0.0858\n",
      "         traffic light        600          5          1          0          0          0\n",
      "          traffic sign        600          1          1          0      0.332      0.102\n",
      "Speed: 0.1ms pre-process, 3.2ms inference, 4.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp245\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_2oldmodels_openimages2/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.561      0.555      0.586      0.355\n",
      "                   car       4952       1201       0.61      0.896      0.851      0.587\n",
      "                person       4952       4528      0.527      0.667      0.611      0.346\n",
      "             aeroplane       4952        285       0.45      0.287      0.331      0.164\n",
      "               bicycle       4952        337      0.566      0.825      0.831      0.531\n",
      "                  bird       4952        459      0.557      0.769      0.727      0.416\n",
      "                  boat       4952        263      0.329      0.719      0.601      0.321\n",
      "                bottle       4952        469       0.66      0.409      0.491      0.288\n",
      "                   bus       4952        213      0.721      0.789       0.81      0.589\n",
      "                   cat       4952        358        0.7      0.735      0.738      0.458\n",
      "                 chair       4952        756      0.515      0.488      0.503      0.286\n",
      "                   cow       4952        244      0.664      0.463      0.557       0.36\n",
      "           diningtable       4952        206      0.444      0.132      0.269     0.0968\n",
      "                   dog       4952        489      0.632      0.716      0.744      0.457\n",
      "                 horse       4952        348      0.663      0.813      0.834      0.541\n",
      "             motorbike       4952        325      0.686      0.344       0.57      0.293\n",
      "           pottedplant       4952        480      0.219     0.0396     0.0986     0.0467\n",
      "                 sheep       4952        242      0.449      0.707      0.562      0.354\n",
      "                  sofa       4952        239      0.526      0.519      0.527      0.328\n",
      "                 train       4952        282      0.816      0.571      0.739      0.458\n",
      "             tvmonitor       4952        308       0.48      0.204      0.335      0.185\n",
      "Speed: 0.1ms pre-process, 1.9ms inference, 2.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp246\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_2oldmodels_openimages2/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.492       0.22      0.223      0.107\n",
      "                   car       2244       8711      0.488      0.744      0.708       0.35\n",
      "                   van       2244        861      0.583      0.174      0.254      0.141\n",
      "                 truck       2244        333      0.244      0.225      0.146     0.0898\n",
      "                  tram       2244        138          1          0     0.0869     0.0433\n",
      "                person       2244       1286      0.285       0.57      0.395      0.174\n",
      "        person_sitting       2244         89      0.725     0.0303     0.0942     0.0321\n",
      "               cyclist       2244        496      0.538     0.0181     0.0817     0.0188\n",
      "                  misc       2244        284     0.0719    0.00101     0.0206    0.00711\n",
      "Speed: 0.0ms pre-process, 1.2ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp247\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# both 1e-3\n",
    "model = f'runs/train/k_v_2oldmodels_openimages2/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "7d6b02c9-e788-463e-b6b0-7ee8950ea15e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_2oldmodels_openimages4/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.483      0.289      0.287      0.134\n",
      "                   car        600        113      0.326      0.442      0.234      0.143\n",
      "                   van        600          6          0          0    0.00152   0.000798\n",
      "                 truck        600         17      0.293      0.529      0.382       0.28\n",
      "                person        600       1131      0.355       0.32      0.258      0.114\n",
      "               bicycle        600         43      0.478      0.419      0.434      0.205\n",
      "                  bird        600         61      0.304      0.492      0.365       0.17\n",
      "                  boat        600         82      0.392      0.402      0.318      0.113\n",
      "                bottle        600          1          1          0     0.0155    0.00311\n",
      "                   bus        600          3      0.229      0.333      0.343       0.24\n",
      "                   cat        600          5          1          0       0.14     0.0538\n",
      "                 chair        600         12      0.178       0.25        0.2      0.071\n",
      "                   dog        600         25      0.587      0.513      0.493       0.24\n",
      "                 horse        600         37      0.455      0.649      0.537      0.222\n",
      "                 sheep        600          8      0.223        0.5      0.508      0.292\n",
      "                 train        600          2          1          0          0          0\n",
      "             billboard        600          3          1          0      0.128     0.0723\n",
      "                rabbit        600          1          0          0     0.0262     0.0157\n",
      "                monkey        600         16      0.418      0.673      0.397      0.213\n",
      "                   pig        600          7      0.244      0.714      0.498      0.264\n",
      "                   toy        600         42      0.145      0.119     0.0505     0.0208\n",
      "         traffic light        600          5          1          0          0          0\n",
      "          traffic sign        600          1          1          0      0.995      0.209\n",
      "Speed: 0.1ms pre-process, 3.3ms inference, 1.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp248\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_2oldmodels_openimages4/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.629      0.223      0.296      0.146\n",
      "                   car       4952       1201      0.605      0.741      0.704      0.447\n",
      "                person       4952       4528       0.42       0.45      0.393      0.187\n",
      "             aeroplane       4952        285          1    0.00675      0.187     0.0756\n",
      "               bicycle       4952        337      0.472      0.516      0.489      0.252\n",
      "                  bird       4952        459      0.424      0.431       0.35      0.149\n",
      "                  boat       4952        263      0.212      0.433      0.234     0.0844\n",
      "                bottle       4952        469      0.216    0.00426     0.0398     0.0152\n",
      "                   bus       4952        213        0.7      0.415      0.505      0.307\n",
      "                   cat       4952        358          1          0      0.244        0.1\n",
      "                 chair       4952        756      0.351      0.202      0.173     0.0705\n",
      "                   cow       4952        244          1          0       0.35        0.2\n",
      "           diningtable       4952        206          1          0      0.107     0.0356\n",
      "                   dog       4952        489       0.53      0.284       0.36      0.152\n",
      "                 horse       4952        348      0.569       0.52       0.51      0.221\n",
      "             motorbike       4952        325          1          0        0.4      0.177\n",
      "           pottedplant       4952        480      0.205    0.00625     0.0134     0.0043\n",
      "                 sheep       4952        242      0.408       0.45      0.426      0.245\n",
      "                  sofa       4952        239          1          0      0.139     0.0637\n",
      "                 train       4952        282          1          0      0.271      0.113\n",
      "             tvmonitor       4952        308      0.468    0.00325     0.0332     0.0143\n",
      "Speed: 0.1ms pre-process, 1.5ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp249\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_2oldmodels_openimages4/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.466      0.394      0.391      0.189\n",
      "                   car       2244       8711      0.557      0.823      0.805      0.431\n",
      "                   van       2244        861      0.707      0.471       0.58       0.32\n",
      "                 truck       2244        333      0.404      0.576       0.47      0.279\n",
      "                  tram       2244        138      0.571      0.159      0.284      0.119\n",
      "                person       2244       1286      0.209      0.593      0.407      0.177\n",
      "        person_sitting       2244         89      0.184       0.18     0.0902     0.0263\n",
      "               cyclist       2244        496       0.45       0.24      0.244     0.0609\n",
      "                  misc       2244        284      0.648      0.113      0.246     0.0959\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp250\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/k_v_2oldmodels_openimages4/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3ae2abbb-1d89-4b72-8143-9f123f191abf",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "508beb97-9aae-4c8a-bea5-6450fc419fb9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6f0b28dd-3a57-4c24-9f2e-ced89db7e451",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b7f96818-73b5-47bd-b356-c50c6e20299b",
   "metadata": {},
   "outputs": [],
   "source": [
    "openimages_k_v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "99851eca-e69c-4963-a52b-92ccd3ff483b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages_k_v.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_o_replay_baseline, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, Old_models=[], DER_enable=False, DER_old_model=[]\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/5fb83cf34fb947429b674b7a1cd8bde9\u001b[0m\n",
      "\n",
      "extractors长度： 0\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old... 7701 images, 0\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_o_replay_baseline/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_o_replay_baseline\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.65G     0.1061    0.04338    0.08751         86        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "       0/49      3.65G    0.07564    0.04558    0.06198         29        640: 1\n",
      "tensor([0.66080], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.766     0.0904        0.1     0.0545\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      5.75G    0.05442    0.03644    0.03384         17        640: 1\n",
      "tensor([0.57694], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.227      0.192      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      5.75G    0.05198    0.03536    0.02582         50        640: 1\n",
      "tensor([0.60062], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.58      0.222      0.265      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      5.75G      0.049    0.03593    0.02106         23        640: 1\n",
      "tensor([0.54128], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.473      0.347      0.291      0.187\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      5.75G    0.04701    0.03556    0.01917         22        640: 1\n",
      "tensor([0.47678], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.486      0.306       0.29      0.185\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      5.75G    0.04596    0.03503    0.01761         38        640: 1\n",
      "tensor([0.55994], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.502      0.355        0.3      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      5.75G    0.04545    0.03552    0.01627         22        640: 1\n",
      "tensor([0.49637], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.47      0.367      0.315      0.202\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      5.75G    0.04434    0.03488    0.01581         14        640: 1\n",
      "tensor([0.49810], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.545      0.335      0.318      0.203\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      5.75G    0.04372    0.03427    0.01495         30        640: 1\n",
      "tensor([0.50948], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.528      0.346      0.323      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      5.75G    0.04334    0.03435    0.01445         20        640: 1\n",
      "tensor([0.34743], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.559      0.363      0.337      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      5.75G     0.0429    0.03409    0.01387         18        640: 1\n",
      "tensor([0.31150], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.53      0.347      0.344      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      5.75G    0.04266    0.03413     0.0136         27        640: 1\n",
      "tensor([0.54007], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.508      0.364      0.334      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      5.75G      0.042    0.03401     0.0128         36        640: 1\n",
      "tensor([0.49153], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.564       0.34      0.337      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      5.75G    0.04176    0.03342    0.01264         22        640: 1\n",
      "tensor([0.39937], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.595      0.332      0.331       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      5.75G    0.04155    0.03404    0.01238         13        640: 1\n",
      "tensor([0.37205], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.363      0.357      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      5.75G    0.04105    0.03326    0.01216         30        640: 1\n",
      "tensor([0.38349], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.568      0.356      0.347      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      5.75G    0.04096    0.03303    0.01163         31        640: 1\n",
      "tensor([0.35461], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.355       0.36      0.238\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      5.75G     0.0406     0.0332    0.01097         38        640: 1\n",
      "tensor([0.41321], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.427      0.404      0.357      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      5.75G    0.03999    0.03229    0.01091         47        640: 1\n",
      "tensor([0.50437], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.434      0.388      0.363      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      5.75G    0.03978    0.03174    0.01132         30        640: 1\n",
      "tensor([0.39471], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.468      0.405      0.367      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      5.75G    0.03967    0.03204    0.01061         22        640: 1\n",
      "tensor([0.39544], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.552       0.39      0.366      0.238\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      5.75G    0.03918    0.03193    0.01034         34        640: 1\n",
      "tensor([0.44935], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.588      0.351      0.357      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      5.75G    0.03897     0.0316    0.01016         21        640: 1\n",
      "tensor([0.30606], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.391      0.381      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      5.75G    0.03899    0.03175   0.009879         31        640: 1\n",
      "tensor([0.52992], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.446      0.425      0.361      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      5.75G    0.03851    0.03187   0.009864         28        640: 1\n",
      "tensor([0.44172], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.487      0.416      0.387      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      5.75G    0.03838    0.03164   0.009568         40        640: 1\n",
      "tensor([0.56243], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.376      0.396      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      5.75G      0.038    0.03119   0.009318         37        640: 1\n",
      "tensor([0.41983], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.543      0.397      0.389      0.259\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      5.75G    0.03779    0.03139   0.008923         31        640: 1\n",
      "tensor([0.36706], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.558      0.394      0.392      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      5.75G    0.03736    0.03084   0.008796         25        640: 1\n",
      "tensor([0.39002], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.479      0.412      0.409      0.288\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      5.75G    0.03716    0.03079   0.008791         22        640: 1\n",
      "tensor([0.28068], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.461      0.416      0.409      0.285\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      5.75G    0.03719    0.03053   0.008603         21        640: 1\n",
      "tensor([0.43485], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.563      0.392      0.406      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      5.75G    0.03661    0.03002   0.008342         32        640: 1\n",
      "tensor([0.39997], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.386       0.41      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      5.75G    0.03638     0.0303   0.008229         26        640: 1\n",
      "tensor([0.29329], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.541      0.432      0.412      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      5.75G    0.03619    0.02965   0.008114         22        640: 1\n",
      "tensor([0.39373], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.474      0.475      0.431      0.286\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      5.75G    0.03593    0.02966   0.007867         25        640: 1\n",
      "tensor([0.32191], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.567      0.427      0.418      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      5.75G    0.03556    0.02949   0.007738         21        640: 1\n",
      "tensor([0.28328], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.588      0.405      0.422      0.294\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      5.75G    0.03522    0.02946    0.00742         42        640: 1\n",
      "tensor([0.32526], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.499       0.46      0.433      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      5.75G     0.0351    0.02902   0.007171         22        640: 1\n",
      "tensor([0.31339], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.501      0.444      0.414      0.287\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      5.75G    0.03486    0.02898   0.007083         27        640: 1\n",
      "tensor([0.38619], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.58      0.408      0.446      0.308\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      5.75G    0.03422    0.02865   0.007058         41        640: 1\n",
      "tensor([0.33828], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.565      0.411       0.42       0.29\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      5.75G    0.03426    0.02906    0.00677         28        640: 1\n",
      "tensor([0.36091], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.514      0.436      0.436      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      5.75G    0.03398    0.02814   0.006624         29        640: 1\n",
      "tensor([0.28275], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.495      0.445      0.444      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      5.75G     0.0335    0.02808   0.006702         21        640: 1\n",
      "tensor([0.30971], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.587      0.412      0.438      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      5.75G    0.03349    0.02782   0.006641         29        640: 1\n",
      "tensor([0.32031], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.447      0.439      0.307\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      5.75G    0.03334    0.02815   0.006466         39        640: 1\n",
      "tensor([0.51784], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.481      0.447       0.43      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      5.75G     0.0329    0.02751   0.006219         31        640: 1\n",
      "tensor([0.39251], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.526      0.443      0.449      0.317\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      5.75G    0.03284     0.0276   0.006229         45        640: 1\n",
      "tensor([0.48795], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.415      0.442      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      5.75G    0.03276    0.02755   0.006042         19        640: 1\n",
      "tensor([0.23556], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.588      0.412      0.447      0.316\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      5.75G    0.03234     0.0267   0.005903         27        640: 1\n",
      "tensor([0.31335], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.606      0.408      0.447      0.308\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      5.75G    0.03234    0.02684   0.005717         40        640: 1\n",
      "tensor([0.41465], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.566       0.41       0.44       0.31\n",
      "\n",
      "50 epochs completed in 0.721 hours.\n",
      "Optimizer stripped from runs/train/k_v_o_replay_baseline/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_o_replay_baseline/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_o_replay_baseline/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.526      0.443      0.448      0.316\n",
      "                   car       1200        287      0.679      0.628      0.627      0.433\n",
      "                   van       1200         29      0.596       0.69      0.685      0.564\n",
      "                 truck       1200         29      0.417      0.483      0.424      0.339\n",
      "                person       1200       2264      0.419      0.418      0.319      0.173\n",
      "               bicycle       1200         54      0.572        0.5      0.547      0.342\n",
      "                  bird       1200        136      0.562      0.713      0.641      0.406\n",
      "                  boat       1200        145      0.665      0.538      0.579      0.313\n",
      "                bottle       1200         31          0          0    0.00148   0.000738\n",
      "                   bus       1200         15      0.652      0.667      0.743      0.612\n",
      "                   cat       1200          1          0          0     0.0199    0.00995\n",
      "                 chair       1200         21      0.145      0.381      0.204      0.121\n",
      "                   dog       1200         42      0.659      0.595      0.626      0.448\n",
      "                 horse       1200         44      0.721      0.705      0.697      0.484\n",
      "                 sheep       1200         10      0.332        0.4      0.362      0.255\n",
      "             billboard       1200          4      0.392       0.25      0.247     0.0755\n",
      "                rabbit       1200         11          1      0.504      0.695      0.576\n",
      "                monkey       1200         18      0.789      0.944      0.879      0.649\n",
      "                   pig       1200          6      0.743        0.5      0.604      0.511\n",
      "                   toy       1200         64      0.251      0.156       0.15     0.0803\n",
      "         traffic light       1200         18          1          0      0.198      0.115\n",
      "          traffic sign       1200          4      0.458      0.229      0.167      0.138\n",
      "Results saved to \u001b[1mruns/train/k_v_o_replay_baseline\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_o_replay_baseline\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/5fb83cf34fb947429b674b7a1cd8bde9\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.533657986594613\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.5466001274200486\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.34158583250261143\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.572174467723444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 27.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.3053372700876304\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.24714689265536724\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.07548153181963543\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 0.3921362736270609\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.25\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6283455080148149\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 76.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.6414564544314673\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.40592529876097566\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.5615137330262153\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.7132352941176471\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 97.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5945948918546714\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 39.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.5794698417483795\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.31263038620671024\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.6646018271881423\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.5379310344827586\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 78.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0014818295867926059\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.0007382772215619894\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.6590087190379816\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.743045557263514\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.6123527633091819\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.651524705922854\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6525953521634325\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 85.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.6267238628660534\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.4326747874033002\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6794536247235278\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.62777970923392\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 180.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.01989999999999999\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.009949999999999995\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.2098675836023718\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 47.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.2043732521343017\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.1206524052758134\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.14482639720734958\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.38095238095238093\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6254747151933538\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6259262354987016\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.4478544471176205\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.6589476211631262\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5952380952380952\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.7127491601639064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.6972140355221353\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.4841755817443203\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.7211461639322335\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.7045454545454546\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 31.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2405]                   : (0.70112144947052, 3.9240598678588867)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]         : (0.10047745366952186, 0.4486326651247519)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]    : (0.054471744379886874, 0.3165865809634192)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]       : (0.42674299367395074, 0.7664376700226357)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]          : (0.09037068120347254, 0.4745664380018838)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.8598409507571897\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8790085034013605\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.6490913474699903\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.7891488617360475\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.9444444444444444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.41849704222997514\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 1315.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.31882585358760374\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.1725126133356991\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.4187080780025278\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.41828621908127206\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 947.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.5978254215892753\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.6042820512820514\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.5108541666666667\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.7432411863918713\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.6703860584524184\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.6952822559793149\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.5759826966171084\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.5041960207427834\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.36302535316349777\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.36248096298535315\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.2552313601201339\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.33230793208863385\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.19273458172529906\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 30.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.15021709744739098\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.08032785778160076\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.251448029225807\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.15625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.1977117715617716\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.11476129113906128\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.30517494794675193\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.16701582867783984\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.1381205772811918\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.457762421920128\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.228881210960064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]          : (0.03233666718006134, 0.0756409764289856)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]          : (0.005716758780181408, 0.06198172643780708)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]          : (0.026699766516685486, 0.04558404162526131)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.4476130143782439\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.42358656007333184\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.3392188371946497\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.4172374546453256\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.4827586206896552\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]            : (0.0498691163957119, 0.059022337198257446)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]            : (0.01075054332613945, 0.03092651255428791)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]            : (0.02405397593975067, 0.027328992262482643)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.6392005245652576\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.6847284627889811\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.5641646747189502\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.5956250490228465\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.6896551724137931\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                   : (0.0004960000000000005, 0.07006224066390042)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                   : (0.0004960000000000005, 0.009597358229598892)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                   : (0.0004960000000000005, 0.009597358229598892)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_o_replay_baseline\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/5fb83cf34fb947429b674b7a1cd8bde9\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_o_replay_baseline\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.30 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Still uploading 2 file(s), remaining 423.30 KB/645.50 KB\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages_k_v.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--name k_v_o_replay_baseline \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8eedd437-97ac-4e9d-8c76-c6b275e55ab6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_o_replay_baseline/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.581      0.426      0.455      0.324\n",
      "                   car        600        113       0.57      0.487      0.455      0.323\n",
      "                   van        600          6       0.25      0.167      0.102     0.0887\n",
      "                 truck        600         17      0.962      0.529      0.717      0.606\n",
      "                person        600       1131      0.499      0.355      0.345      0.188\n",
      "               bicycle        600         43      0.821      0.395      0.518      0.347\n",
      "                  bird        600         61      0.593      0.557      0.528      0.401\n",
      "                  boat        600         82       0.74      0.451       0.52      0.331\n",
      "                bottle        600          1          0          0     0.0237    0.00864\n",
      "                   bus        600          3      0.371      0.333      0.337      0.304\n",
      "                   cat        600          5          1      0.773      0.809      0.435\n",
      "                 chair        600         12      0.275      0.333      0.243      0.163\n",
      "                   dog        600         25      0.737       0.92      0.843      0.606\n",
      "                 horse        600         37      0.697      0.703      0.677      0.449\n",
      "                 sheep        600          8      0.606      0.625       0.56      0.436\n",
      "                 train        600          2          1          0      0.084      0.057\n",
      "             billboard        600          3          0          0     0.0643     0.0514\n",
      "                rabbit        600          1          0          0      0.199      0.179\n",
      "                monkey        600         16      0.788       0.75      0.845      0.547\n",
      "                   pig        600          7      0.597      0.857      0.722      0.559\n",
      "                   toy        600         42      0.481      0.133       0.17     0.0967\n",
      "         traffic light        600          5          1          0      0.245      0.152\n",
      "          traffic sign        600          1      0.791          1      0.995      0.796\n",
      "Speed: 0.1ms pre-process, 2.9ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp274\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_o_replay_baseline/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.782      0.646      0.724      0.477\n",
      "                   car       4952       1201      0.823      0.799      0.868      0.646\n",
      "                person       4952       4528      0.757      0.735      0.784      0.487\n",
      "             aeroplane       4952        285      0.912      0.583      0.787      0.472\n",
      "               bicycle       4952        337      0.821      0.761      0.823      0.558\n",
      "                  bird       4952        459      0.823      0.623       0.72      0.452\n",
      "                  boat       4952        263      0.608      0.574      0.595      0.336\n",
      "                bottle       4952        469      0.809      0.516      0.604      0.393\n",
      "                   bus       4952        213      0.829       0.73       0.81      0.641\n",
      "                   cat       4952        358      0.844      0.749      0.828       0.56\n",
      "                 chair       4952        756      0.724      0.435      0.542      0.328\n",
      "                   cow       4952        244       0.79       0.74      0.797      0.556\n",
      "           diningtable       4952        206      0.716       0.55      0.619      0.388\n",
      "                   dog       4952        489      0.813      0.663      0.776      0.512\n",
      "                 horse       4952        348      0.892      0.773      0.851      0.598\n",
      "             motorbike       4952        325      0.852      0.674      0.788       0.49\n",
      "           pottedplant       4952        480      0.613      0.435      0.463      0.236\n",
      "                 sheep       4952        242      0.715       0.64      0.684      0.469\n",
      "                  sofa       4952        239      0.653       0.63      0.661      0.463\n",
      "                 train       4952        282      0.826      0.741      0.811      0.514\n",
      "             tvmonitor       4952        308      0.822      0.571      0.678      0.448\n",
      "Speed: 0.1ms pre-process, 1.5ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp275\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_o_replay_baseline/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.72      0.666      0.707      0.427\n",
      "                   car       2244       8711      0.838      0.865      0.912      0.642\n",
      "                   van       2244        861      0.732      0.664      0.717      0.471\n",
      "                 truck       2244        333      0.727      0.823      0.855      0.597\n",
      "                  tram       2244        138      0.722      0.862      0.819      0.468\n",
      "                person       2244       1286      0.789      0.615      0.684      0.346\n",
      "        person_sitting       2244         89      0.516      0.416      0.439      0.222\n",
      "               cyclist       2244        496      0.728      0.637       0.69      0.354\n",
      "                  misc       2244        284       0.71      0.449      0.538      0.317\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp276\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/k_v_o_replay_baseline/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "560555ae-d54b-432a-a310-6abe4fff5bc8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7826b990-f87a-4ad2-91a8-d9d943c35777",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fcf3cf30-367b-40bb-a32a-c01725a5248a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "4790ce98-b9ef-4090-86f1-600f4f546774",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_DER: \u001b[0mweights=./runs/train/der_replay/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages_k_v.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_o_replay_DER, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=[], Lwf_temperature=1.0, Old_models=[], DER_enable=True, DER_old_model=['./runs/train/fog_02/weights/last.pt', './runs/train/der_replay/weights/last.pt']\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/8377e6c101394038a261134be4d63a4c\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo_DerTest.Detect              [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "Model summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo_DerTest.Detect              [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "Model summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "extractors长度： 2\n",
      "首次创建 extractors\n",
      "成功拼接 extractors\n",
      "extractors共有模型个数： 3\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    330999  models.yolo_DerTest.Detect              [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [384, 768, 1536]]\n",
      "已知类别： 26\n",
      "YOLOv5s_openimages summary: 656 layers, 21511269 parameters, 7380301 gradients, 99.7 GFLOPs\n",
      "\n",
      "Transferred 1059/1433 items from runs/train/der_replay/weights/last.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 171 weight(decay=0.0), 207 weight(decay=0.0005), 183 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 7701 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_o_replay_DER/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_o_replay_DER\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49       9.2G     0.0687    0.03876    0.05374        105        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': GnuTLS recv error (-110): The TLS connection was non-properly terminated.\n",
      "       0/49       9.2G    0.05611    0.03398    0.03773         29        640: 1\n",
      "tensor([1.08475], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.781      0.164      0.171     0.0974\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      11.3G    0.05549    0.03078    0.02441         17        640: 1\n",
      "tensor([1.08857], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.626        0.2      0.218      0.113\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      11.3G    0.05217    0.03139    0.01798         50        640: 1\n",
      "tensor([1.04988], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.506      0.349      0.316      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      11.3G    0.04858    0.03262    0.01477         23        640: 1\n",
      "tensor([1.03214], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.512      0.345      0.331      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      11.3G    0.04554    0.03124    0.01385         22        640: 1\n",
      "tensor([0.79724], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.466      0.399      0.331      0.209\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      11.3G    0.04463    0.03158    0.01243         38        640: 1\n",
      "tensor([1.00883], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.539      0.332      0.331      0.201\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      11.3G    0.04412    0.03186    0.01193         22        640: 1\n",
      "tensor([1.00337], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.508       0.36      0.348      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      11.3G     0.0444    0.03133    0.01207         14        640: 1\n",
      "tensor([0.93206], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482      0.381      0.353      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      11.3G    0.04228    0.03094    0.01068         30        640: 1\n",
      "tensor([0.85179], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.516      0.352      0.355      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      11.3G    0.04161    0.03055    0.01051         20        640: 1\n",
      "tensor([0.62587], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.47      0.393      0.365      0.246\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      11.3G    0.04039     0.0301   0.009826         18        640: 1\n",
      "tensor([0.51184], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.513      0.363      0.389      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      11.3G    0.04091    0.03097       0.01         27        640: 1\n",
      "tensor([0.88173], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.46      0.412      0.393      0.259\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      11.3G    0.04034    0.03071   0.009118         36        640: 1\n",
      "tensor([0.98875], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.457      0.419      0.387      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      11.3G    0.03967    0.02929    0.00911         22        640: 1\n",
      "tensor([0.60701], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52       0.39      0.399      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      11.3G     0.0403    0.02938   0.008453         13        640: 1\n",
      "tensor([0.69444], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233        0.5      0.401      0.413      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      11.3G    0.03861    0.02949   0.008807         30        640: 1\n",
      "tensor([0.68954], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.502      0.397      0.398      0.277\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      11.3G    0.03902    0.02978   0.008707         31        640: 1\n",
      "tensor([0.69896], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.583      0.406      0.413      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      11.3G    0.03865    0.02996    0.00815         38        640: 1\n",
      "tensor([0.72849], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.453      0.416      0.402       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      11.3G    0.03806    0.02919     0.0077         47        640: 1\n",
      "tensor([0.98563], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.565      0.392      0.421      0.283\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      11.3G    0.03773     0.0283   0.007975         30        640: 1\n",
      "tensor([0.68281], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.451      0.427      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      11.3G    0.03763    0.02909    0.00744         22        640: 1\n",
      "tensor([0.67713], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.468      0.407      0.417      0.287\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      11.3G    0.03724    0.02838   0.007563         34        640: 1\n",
      "tensor([0.79669], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.473      0.423      0.419      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      11.3G    0.03639    0.02842   0.006972         21        640: 1\n",
      "tensor([0.60617], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.533      0.407      0.433      0.294\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      11.3G    0.03705     0.0287   0.007101         31        640: 1\n",
      "tensor([0.85535], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.512      0.371      0.424      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      11.3G    0.03619    0.02887   0.006937         28        640: 1\n",
      "tensor([0.84045], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.555      0.405      0.442       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      11.3G    0.03646    0.02879   0.006659         40        640: 1\n",
      "tensor([1.09957], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.427      0.451      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      11.3G    0.03565    0.02813   0.006541         37        640: 1\n",
      "tensor([0.74771], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.493       0.45      0.443      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      11.3G    0.03504    0.02766   0.006478         31        640: 1\n",
      "tensor([0.68808], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.51      0.451      0.444      0.296\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      11.3G     0.0347    0.02793   0.006176         25        640: 1\n",
      "tensor([0.69036], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.595      0.421      0.446      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      11.3G    0.03419    0.02692   0.005864         22        640: 1\n",
      "tensor([0.53704], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.511      0.436      0.442      0.314\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      11.3G    0.03472    0.02753     0.0067         21        640: 1\n",
      "tensor([0.81999], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.538      0.424       0.46      0.319\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      11.3G    0.03393     0.0264   0.005905         32        640: 1\n",
      "tensor([0.66964], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.559      0.473      0.489      0.328\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      11.3G    0.03364    0.02695   0.005798         26        640: 1\n",
      "tensor([0.54950], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.423      0.467      0.322\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      11.3G    0.03372    0.02628   0.006181         22        640: 1\n",
      "tensor([0.75201], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.539      0.476      0.468      0.329\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      11.3G    0.03293    0.02602   0.005406         25        640: 1\n",
      "tensor([0.59832], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.538      0.413      0.461      0.317\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      11.3G    0.03242    0.02587   0.005366         21        640: 1\n",
      "tensor([0.51798], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.531      0.458      0.462      0.317\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      11.3G    0.03218    0.02612   0.005315         42        640: 1\n",
      "tensor([0.60043], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.428      0.437      0.309\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      11.3G    0.03175     0.0252   0.004878         22        640: 1\n",
      "tensor([0.57225], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.443      0.454      0.321\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      11.3G    0.03138     0.0255    0.00531         27        640: 1\n",
      "tensor([0.79119], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.55      0.428      0.462      0.336\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      11.3G    0.03082     0.0254   0.004971         41        640: 1\n",
      "tensor([0.60892], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.569      0.402       0.44      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      11.3G    0.03135    0.02588   0.004799         28        640: 1\n",
      "tensor([0.67362], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.549      0.437      0.466      0.333\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      11.3G     0.0307    0.02488   0.004636         29        640: 1\n",
      "tensor([0.52658], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233        0.6      0.403      0.447      0.319\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      11.3G    0.03052    0.02479   0.004822         21        640: 1\n",
      "tensor([0.62888], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.494      0.476      0.454      0.327\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      11.3G    0.02994    0.02453   0.004511         29        640: 1\n",
      "tensor([0.58088], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.542      0.434      0.464      0.328\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      11.3G    0.03016    0.02563   0.004541         39        640: 1\n",
      "tensor([0.99437], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.522      0.443      0.462      0.334\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      11.3G    0.02974    0.02424   0.004187         31        640: 1\n",
      "tensor([0.65640], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.559      0.427      0.459      0.332\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      11.3G    0.03006    0.02441   0.004421         45        640: 1\n",
      "tensor([0.98160], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.539      0.445       0.46      0.335\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      11.3G    0.02953    0.02381   0.004175         19        640: 1\n",
      "tensor([0.40993], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.59      0.415      0.459      0.334\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      11.3G    0.02867    0.02332   0.004057         27        640: 1\n",
      "tensor([0.53658], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.541      0.434      0.464      0.338\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      11.3G    0.02895    0.02383   0.004105         40        640: 1\n",
      "tensor([0.70324], device='cuda:0', grad_fn=<AddBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.552      0.443      0.469      0.338\n",
      "\n",
      "50 epochs completed in 1.643 hours.\n",
      "Optimizer stripped from runs/train/k_v_o_replay_DER/weights/last.pt, 43.8MB\n",
      "Optimizer stripped from runs/train/k_v_o_replay_DER/weights/best.pt, 43.8MB\n",
      "\n",
      "Validating runs/train/k_v_o_replay_DER/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 599 layers, 21501765 parameters, 0 gradients, 99.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.55      0.443      0.469      0.338\n",
      "                   car       1200        287      0.678      0.631      0.619      0.432\n",
      "                   van       1200         29      0.636      0.621      0.616      0.482\n",
      "                 truck       1200         29      0.543      0.492      0.556      0.409\n",
      "                person       1200       2264      0.448      0.383      0.324      0.175\n",
      "               bicycle       1200         54      0.672      0.426      0.495      0.339\n",
      "                  bird       1200        136      0.612      0.676      0.628      0.421\n",
      "                  boat       1200        145      0.663      0.428      0.512      0.291\n",
      "                bottle       1200         31          0          0   0.000999   9.99e-05\n",
      "                   bus       1200         15      0.679        0.8      0.792      0.687\n",
      "                   cat       1200          1          0          0      0.124     0.0622\n",
      "                 chair       1200         21      0.226      0.429      0.287      0.165\n",
      "                   dog       1200         42      0.758      0.595      0.662      0.434\n",
      "                 horse       1200         44      0.732      0.705      0.751      0.535\n",
      "                 sheep       1200         10      0.474        0.5      0.388      0.277\n",
      "             billboard       1200          4          0          0     0.0294    0.00294\n",
      "                rabbit       1200         11          1      0.533       0.71      0.602\n",
      "                monkey       1200         18      0.815      0.889      0.894      0.689\n",
      "                   pig       1200          6      0.791      0.667      0.691      0.566\n",
      "                   toy       1200         64      0.618      0.172      0.258      0.146\n",
      "         traffic light       1200         18      0.814      0.111      0.253      0.145\n",
      "          traffic sign       1200          4      0.386       0.25      0.258      0.229\n",
      "Results saved to \u001b[1mruns/train/k_v_o_replay_DER\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/8377e6c101394038a261134be4d63a4c\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.5213535998853924\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.4949506139563807\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.33926069075347687\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.671888621028277\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.42592592592592593\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 23.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.029371794871794872\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.002937179487179487\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6428170693216431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 58.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.6277818754896178\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.42120853307855277\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.612353300830122\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.6764705882352942\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 92.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5200381995777925\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 31.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.5117360644184489\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.2908930229810717\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.6634988364868105\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.42758620689655175\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 62.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0009990828978855588\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 9.990828978855587e-05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.734413487552823\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.7915841389003153\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.687163242575229\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.6787661101386591\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6532690022030477\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 86.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.618528705166169\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.4317025527208104\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6775570010728487\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.6306620209059234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 181.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.124375\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.06218749999999999\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.2957806521957995\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 31.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.2865625416645342\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.16549133840535546\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.2258134508134508\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.42857142857142855\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6668819989412099\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6619537209523707\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.4336057884794555\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.7581321130294076\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5952380952380952\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.7182227418813618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.7509154225328708\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.5353581524166579\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.732441574472204\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.7045454545454546\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 31.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2405]                   : (1.243589162826538, 10.229164123535156)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]         : (0.17054586517947823, 0.4888941391512859)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]    : (0.09739356470936211, 0.33797998161832465)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]       : (0.4526294477173048, 0.7809319700620763)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]          : (0.1641745539897199, 0.4759465501986557)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.8502132595209381\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8944487179487179\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.688928171738518\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.8147628598325747\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.8888888888888888\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.41329444833323525\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 1067.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.3239621155092418\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.1753154623934643\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.448475209374819\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.3832317299631788\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 868.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.7236460821101666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.6913480870217553\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.5661493660915229\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.7912757244618704\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.6950386496584046\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.7103230769230771\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.6015258333333334\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.5326124405726396\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.4865998578393584\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.3880181161575729\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.27724177274403206\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.47389922389922384\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.2689327368589231\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.2583581274335664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.14620247511556408\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.6178094761428093\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.171875\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.19553124806712172\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.2528509307590804\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.14549223224116453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 0.8139715734402505\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.1111111111111111\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.30359138032182525\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.2581140210795273\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.22949712551900353\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.3864282800053213\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.25\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]          : (0.02867359109222889, 0.05610783025622368)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]          : (0.004056769888848066, 0.03773273155093193)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]          : (0.023323794826865196, 0.03398336470127106)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.5164280082513103\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.5561827953119093\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.40861552584461414\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.5431484818151485\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.492213316765041\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]            : (0.0495649091899395, 0.062437597662210464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]            : (0.009998160414397717, 0.025581520050764084)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]            : (0.02316690795123577, 0.027967778965830803)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.6283606459023462\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.6164069408717423\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.48181018918411017\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.6362236180585934\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.6206896551724138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 18.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                   : (0.0004960000000000005, 0.07006224066390042)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                   : (0.0004960000000000005, 0.009597358229598892)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                   : (0.0004960000000000005, 0.009597358229598892)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/8377e6c101394038a261134be4d63a4c\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     DER_old_model       : ['./runs/train/fog_02/weights/last.pt', './runs/train/der_replay/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : []\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/openimages_k_v.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 50\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_o_replay_DER\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/der_replay/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.33 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_DER.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages_k_v.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/der_replay/weights/last.pt \\\n",
    "--name k_v_o_replay_DER \\\n",
    "--DER_enable \\\n",
    "--DER_old_model \\\n",
    "   ./runs/train/fog_02/weights/last.pt \\\n",
    "   ./runs/train/der_replay/weights/last.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c3948d85-e245-4006-ac1c-19fb4409f55b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 599 layers, 21501765 parameters, 0 gradients, 99.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.619      0.478      0.501      0.372\n",
      "                   car        600        113      0.546        0.5      0.455      0.322\n",
      "                   van        600          6      0.483      0.167      0.188      0.169\n",
      "                 truck        600         17      0.881      0.588      0.688      0.541\n",
      "                person        600       1131      0.503       0.36      0.344      0.181\n",
      "               bicycle        600         43      0.735      0.419      0.578      0.394\n",
      "                  bird        600         61      0.571      0.524      0.561      0.431\n",
      "                  boat        600         82        0.9       0.44      0.551      0.335\n",
      "                bottle        600          1          0          0     0.0711      0.064\n",
      "                   bus        600          3       0.37      0.667      0.624      0.571\n",
      "                   cat        600          5      0.858        0.6      0.762      0.373\n",
      "                 chair        600         12       0.24       0.25      0.197      0.131\n",
      "                   dog        600         25      0.761       0.92      0.791       0.64\n",
      "                 horse        600         37      0.761      0.773      0.768       0.47\n",
      "                 sheep        600          8      0.416      0.625      0.644      0.477\n",
      "                 train        600          2          1          0      0.141     0.0985\n",
      "             billboard        600          3          1          0          0          0\n",
      "                rabbit        600          1      0.386          1      0.995      0.995\n",
      "                monkey        600         16      0.781       0.75      0.832      0.588\n",
      "                   pig        600          7      0.833      0.714      0.855      0.702\n",
      "                   toy        600         42      0.232     0.0238       0.11     0.0589\n",
      "         traffic light        600          5      0.904        0.2      0.375      0.244\n",
      "          traffic sign        600          1      0.448          1      0.497      0.398\n",
      "Speed: 0.1ms pre-process, 5.1ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp281\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 599 layers, 21501765 parameters, 0 gradients, 99.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.789      0.682      0.753      0.502\n",
      "                   car       4952       1201      0.826      0.847      0.887      0.668\n",
      "                person       4952       4528      0.755      0.756      0.796      0.503\n",
      "             aeroplane       4952        285      0.889      0.591      0.781      0.456\n",
      "               bicycle       4952        337      0.872       0.79      0.866      0.583\n",
      "                  bird       4952        459      0.819      0.645      0.737      0.461\n",
      "                  boat       4952        263      0.638      0.608      0.602      0.333\n",
      "                bottle       4952        469      0.747      0.544      0.624      0.392\n",
      "                   bus       4952        213      0.842      0.751      0.838      0.674\n",
      "                   cat       4952        358      0.858      0.816      0.853      0.596\n",
      "                 chair       4952        756      0.749       0.52      0.601      0.365\n",
      "                   cow       4952        244      0.792      0.718       0.81      0.571\n",
      "           diningtable       4952        206      0.695      0.576      0.652      0.421\n",
      "                   dog       4952        489      0.835      0.683      0.803      0.538\n",
      "                 horse       4952        348      0.904      0.839      0.892      0.643\n",
      "             motorbike       4952        325      0.848      0.685      0.809      0.514\n",
      "           pottedplant       4952        480      0.694      0.442      0.511      0.254\n",
      "                 sheep       4952        242      0.696      0.718      0.722      0.509\n",
      "                  sofa       4952        239      0.668      0.707      0.704      0.497\n",
      "                 train       4952        282      0.854      0.766      0.851      0.584\n",
      "             tvmonitor       4952        308      0.798      0.642      0.723      0.486\n",
      "Speed: 0.1ms pre-process, 3.8ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp282\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_o_replay_DER/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 7330b3e4 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 599 layers, 21501765 parameters, 0 gradients, 99.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.849      0.741      0.818       0.53\n",
      "                   car       2244       8711      0.917      0.882      0.942      0.714\n",
      "                   van       2244        861      0.899      0.813      0.875      0.626\n",
      "                 truck       2244        333       0.92      0.904      0.946      0.721\n",
      "                  tram       2244        138      0.835      0.862      0.915      0.592\n",
      "                person       2244       1286      0.867      0.649      0.752      0.401\n",
      "        person_sitting       2244         89      0.646       0.43      0.551      0.251\n",
      "               cyclist       2244        496      0.847      0.701        0.8      0.447\n",
      "                  misc       2244        284       0.86      0.687      0.765      0.485\n",
      "Speed: 0.0ms pre-process, 2.1ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp283\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/k_v_o_replay_DER/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2600ceb6-d086-4355-8541-a82487348c3b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0c2b3f05-2dd4-418b-8e74-f2a5a74c88b2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b951f32-0864-4e32-bfac-541be7277f45",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "39864ddb-0083-4508-b422-cdb393b66a31",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "e91cadb7-c0a1-4818-9a87-6b44a4cf7dbb",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_LwfPro: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages_k_v.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=replay_k_v_2oldmodels_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.0001, 0.0005], Lwf_temperature=1.0, Old_models=['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/23518c5d6afa4511b2714b00d6748d12\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Overriding model.yaml nc=36 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/kitti_old.cache... 7701 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.23 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/replay_k_v_2oldmodels_openimages/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/replay_k_v_2oldmodels_openimages\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.68G    0.07716    0.04571    0.05726         29        640: 1\n",
      "tensor([10.51310], device='cuda:0', grad_fn=<AddBackward0>) tensor(16331.36914, device='cuda:0', grad_fn=<AddBackward0>), tensor(16439.09180, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   fatal: unable to access 'https://github.com/ultralytics/yolov5/': GnuTLS recv error (-110): The TLS connection was non-properly terminated.\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.648      0.162      0.137     0.0717\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.79G    0.05627    0.03754    0.03305         17        640: 1\n",
      "tensor([8.77668], device='cuda:0', grad_fn=<AddBackward0>) tensor(14658.12109, device='cuda:0', grad_fn=<AddBackward0>), tensor(13367.02148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.593      0.223      0.199      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79       5.8G    0.05254    0.03684    0.02886         50        640: 1\n",
      "tensor([7.93701], device='cuda:0', grad_fn=<AddBackward0>) tensor(15976.80469, device='cuda:0', grad_fn=<AddBackward0>), tensor(11372.59180, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.643      0.244      0.225      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79       5.8G    0.05124    0.03709    0.02705         23        640: 1\n",
      "tensor([8.74217], device='cuda:0', grad_fn=<AddBackward0>) tensor(15348.08691, device='cuda:0', grad_fn=<AddBackward0>), tensor(13098.53906, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.648       0.23      0.245      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79       5.8G    0.04965    0.03678    0.02641         22        640: 1\n",
      "tensor([8.87020], device='cuda:0', grad_fn=<AddBackward0>) tensor(16362.27734, device='cuda:0', grad_fn=<AddBackward0>), tensor(13392.97070, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.58      0.248      0.251       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79       5.8G    0.04915    0.03651    0.02525         38        640: 1\n",
      "tensor([7.67181], device='cuda:0', grad_fn=<AddBackward0>) tensor(16396.96484, device='cuda:0', grad_fn=<AddBackward0>), tensor(10817.07520, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.504      0.279      0.279      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79       5.8G    0.04862     0.0373    0.02455         22        640: 1\n",
      "tensor([8.63672], device='cuda:0', grad_fn=<AddBackward0>) tensor(18887.63477, device='cuda:0', grad_fn=<AddBackward0>), tensor(12262.19238, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.496      0.281      0.281      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79       5.8G     0.0477    0.03658    0.02403         14        640: 1\n",
      "tensor([7.09475], device='cuda:0', grad_fn=<AddBackward0>) tensor(18861.67383, device='cuda:0', grad_fn=<AddBackward0>), tensor(9366.56543, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.552      0.261      0.269      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79       5.8G     0.0472    0.03629    0.02344         30        640: 1\n",
      "tensor([7.77604], device='cuda:0', grad_fn=<AddBackward0>) tensor(17825.39648, device='cuda:0', grad_fn=<AddBackward0>), tensor(10834.92676, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.516      0.276      0.271      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79       5.8G    0.04672    0.03655    0.02316         20        640: 1\n",
      "tensor([7.44589], device='cuda:0', grad_fn=<AddBackward0>) tensor(17918.78906, device='cuda:0', grad_fn=<AddBackward0>), tensor(10393.78125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.519       0.29      0.288      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79       5.8G    0.04651    0.03625    0.02244         18        640: 1\n",
      "tensor([6.83000], device='cuda:0', grad_fn=<AddBackward0>) tensor(19322.33203, device='cuda:0', grad_fn=<AddBackward0>), tensor(9117.55176, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.288      0.294      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79       5.8G    0.04608    0.03641    0.02226         27        640: 1\n",
      "tensor([6.37926], device='cuda:0', grad_fn=<AddBackward0>) tensor(17102.02734, device='cuda:0', grad_fn=<AddBackward0>), tensor(8271.09766, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521      0.283      0.295      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79       5.8G    0.04598    0.03651    0.02198         36        640: 1\n",
      "tensor([7.72120], device='cuda:0', grad_fn=<AddBackward0>) tensor(19964.02930, device='cuda:0', grad_fn=<AddBackward0>), tensor(10249.83594, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.279      0.294      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79       5.8G    0.04538    0.03595    0.02182         22        640: 1\n",
      "tensor([5.90784], device='cuda:0', grad_fn=<AddBackward0>) tensor(17048.60938, device='cuda:0', grad_fn=<AddBackward0>), tensor(7499.69873, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.495      0.289      0.294       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79       5.8G    0.04546    0.03666    0.02148         13        640: 1\n",
      "tensor([7.65030], device='cuda:0', grad_fn=<AddBackward0>) tensor(19488.63477, device='cuda:0', grad_fn=<AddBackward0>), tensor(10214.98926, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482      0.342      0.297      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79       5.8G     0.0449    0.03593    0.02136         30        640: 1\n",
      "tensor([6.20210], device='cuda:0', grad_fn=<AddBackward0>) tensor(19223.26953, device='cuda:0', grad_fn=<AddBackward0>), tensor(7635.43848, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.459      0.304      0.291      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79       5.8G    0.04487     0.0358    0.02119         31        640: 1\n",
      "tensor([5.74114], device='cuda:0', grad_fn=<AddBackward0>) tensor(19903.57812, device='cuda:0', grad_fn=<AddBackward0>), tensor(6709.12598, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.522      0.295      0.301      0.181\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79       5.8G    0.04461    0.03604     0.0204         38        640: 1\n",
      "tensor([6.47911], device='cuda:0', grad_fn=<AddBackward0>) tensor(19286.01758, device='cuda:0', grad_fn=<AddBackward0>), tensor(8164.61182, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.297      0.317      0.186\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79       5.8G    0.04397    0.03526    0.02025         47        640: 1\n",
      "tensor([6.87703], device='cuda:0', grad_fn=<AddBackward0>) tensor(18170.24414, device='cuda:0', grad_fn=<AddBackward0>), tensor(8784.17871, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.607      0.287      0.325      0.196\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79       5.8G    0.04394    0.03479     0.0207         30        640: 1\n",
      "tensor([6.18976], device='cuda:0', grad_fn=<AddBackward0>) tensor(17449.41211, device='cuda:0', grad_fn=<AddBackward0>), tensor(7974.00049, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.544      0.301      0.328      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79       5.8G    0.04366    0.03515    0.02028         22        640: 1\n",
      "tensor([6.34494], device='cuda:0', grad_fn=<AddBackward0>) tensor(19546.11523, device='cuda:0', grad_fn=<AddBackward0>), tensor(7851.42822, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.284      0.304      0.181\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79       5.8G    0.04362    0.03523    0.02003         34        640: 1\n",
      "tensor([5.91140], device='cuda:0', grad_fn=<AddBackward0>) tensor(17574.38086, device='cuda:0', grad_fn=<AddBackward0>), tensor(7303.36670, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521        0.3      0.319      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79       5.8G    0.04335    0.03495    0.01981         21        640: 1\n",
      "tensor([5.97336], device='cuda:0', grad_fn=<AddBackward0>) tensor(17456.75195, device='cuda:0', grad_fn=<AddBackward0>), tensor(7670.90186, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.397      0.358      0.318       0.19\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79       5.8G    0.04346    0.03524    0.01977         31        640: 1\n",
      "tensor([5.97698], device='cuda:0', grad_fn=<AddBackward0>) tensor(19694.71484, device='cuda:0', grad_fn=<AddBackward0>), tensor(6824.68213, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.425      0.381      0.329        0.2\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79       5.8G    0.04312    0.03543     0.0196         28        640: 1\n",
      "tensor([7.06826], device='cuda:0', grad_fn=<AddBackward0>) tensor(18181.12109, device='cuda:0', grad_fn=<AddBackward0>), tensor(9301.21191, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233        0.5      0.322      0.305      0.186\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79       5.8G     0.0431    0.03519    0.01953         40        640: 1\n",
      "tensor([6.58563], device='cuda:0', grad_fn=<AddBackward0>) tensor(17406.40430, device='cuda:0', grad_fn=<AddBackward0>), tensor(8422.47168, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.545      0.288      0.325      0.195\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79       5.8G    0.04275     0.0349    0.01924         37        640: 1\n",
      "tensor([6.28242], device='cuda:0', grad_fn=<AddBackward0>) tensor(19361.75586, device='cuda:0', grad_fn=<AddBackward0>), tensor(7577.78809, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.503      0.324      0.331      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79       5.8G    0.04274    0.03517    0.01899         31        640: 1\n",
      "tensor([7.00882], device='cuda:0', grad_fn=<AddBackward0>) tensor(19302.94141, device='cuda:0', grad_fn=<AddBackward0>), tensor(9298.48438, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.333      0.319      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79       5.8G    0.04219    0.03483    0.01884         25        640: 1\n",
      "tensor([5.65575], device='cuda:0', grad_fn=<AddBackward0>) tensor(17984.95312, device='cuda:0', grad_fn=<AddBackward0>), tensor(6861.65186, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521      0.314      0.324      0.193\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79       5.8G     0.0422    0.03481     0.0189         22        640: 1\n",
      "tensor([6.42003], device='cuda:0', grad_fn=<AddBackward0>) tensor(18975.77734, device='cuda:0', grad_fn=<AddBackward0>), tensor(8234.03906, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.44      0.373      0.336      0.208\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79       5.8G     0.0424    0.03457     0.0187         21        640: 1\n",
      "tensor([7.39847], device='cuda:0', grad_fn=<AddBackward0>) tensor(20355.92578, device='cuda:0', grad_fn=<AddBackward0>), tensor(9362.90625, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.502      0.331       0.33      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79       5.8G    0.04197     0.0342    0.01884         32        640: 1\n",
      "tensor([6.59530], device='cuda:0', grad_fn=<AddBackward0>) tensor(18584.36328, device='cuda:0', grad_fn=<AddBackward0>), tensor(8393.51855, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.502      0.353      0.326      0.198\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79       5.8G    0.04202    0.03472    0.01854         26        640: 1\n",
      "tensor([6.03002], device='cuda:0', grad_fn=<AddBackward0>) tensor(19337.91211, device='cuda:0', grad_fn=<AddBackward0>), tensor(7416.39404, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.541       0.33      0.343      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79       5.8G    0.04186    0.03412    0.01886         22        640: 1\n",
      "tensor([6.61236], device='cuda:0', grad_fn=<AddBackward0>) tensor(19584.10742, device='cuda:0', grad_fn=<AddBackward0>), tensor(8340.61914, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.352      0.336      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79       5.8G    0.04162     0.0342    0.01839         25        640: 1\n",
      "tensor([6.23728], device='cuda:0', grad_fn=<AddBackward0>) tensor(19521.05273, device='cuda:0', grad_fn=<AddBackward0>), tensor(7663.79443, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.506      0.363       0.33      0.207\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79       5.8G    0.04136    0.03412    0.01812         21        640: 1\n",
      "tensor([5.79434], device='cuda:0', grad_fn=<AddBackward0>) tensor(19101.75586, device='cuda:0', grad_fn=<AddBackward0>), tensor(7050.07324, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.446      0.343      0.326      0.196\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79       5.8G    0.04129    0.03439    0.01799         42        640: 1\n",
      "tensor([5.44780], device='cuda:0', grad_fn=<AddBackward0>) tensor(17095.14844, device='cuda:0', grad_fn=<AddBackward0>), tensor(6592.95264, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.503      0.361      0.346      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79       5.8G    0.04112    0.03389    0.01787         22        640: 1\n",
      "tensor([6.36837], device='cuda:0', grad_fn=<AddBackward0>) tensor(18312.72656, device='cuda:0', grad_fn=<AddBackward0>), tensor(8130.78613, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.508      0.336      0.346       0.21\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79       5.8G    0.04104    0.03406    0.01788         27        640: 1\n",
      "tensor([7.67494], device='cuda:0', grad_fn=<AddBackward0>) tensor(22058.27344, device='cuda:0', grad_fn=<AddBackward0>), tensor(9783.85645, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.51      0.381      0.348      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79       5.8G    0.04058    0.03378    0.01775         41        640: 1\n",
      "tensor([6.65567], device='cuda:0', grad_fn=<AddBackward0>) tensor(18327.82422, device='cuda:0', grad_fn=<AddBackward0>), tensor(8752.14062, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.497      0.351      0.337      0.206\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79       5.8G    0.04079    0.03447    0.01779         28        640: 1\n",
      "tensor([6.62117], device='cuda:0', grad_fn=<AddBackward0>) tensor(18271.75977, device='cuda:0', grad_fn=<AddBackward0>), tensor(8564.35059, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.507      0.381      0.347      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79       5.8G    0.04049    0.03353    0.01762         29        640: 1\n",
      "tensor([6.12750], device='cuda:0', grad_fn=<AddBackward0>) tensor(17491.76953, device='cuda:0', grad_fn=<AddBackward0>), tensor(7939.37842, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.497      0.352      0.338      0.213\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79       5.8G    0.04028    0.03362    0.01768         21        640: 1\n",
      "tensor([6.34398], device='cuda:0', grad_fn=<AddBackward0>) tensor(19232.33984, device='cuda:0', grad_fn=<AddBackward0>), tensor(7917.80859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.376      0.348      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79       5.8G     0.0403    0.03341    0.01748         29        640: 1\n",
      "tensor([5.44651], device='cuda:0', grad_fn=<AddBackward0>) tensor(19339.85742, device='cuda:0', grad_fn=<AddBackward0>), tensor(6119.65039, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.519      0.358      0.346      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79       5.8G    0.04025    0.03385    0.01776         39        640: 1\n",
      "tensor([6.43562], device='cuda:0', grad_fn=<AddBackward0>) tensor(18732.32422, device='cuda:0', grad_fn=<AddBackward0>), tensor(7765.56934, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.496      0.383      0.345      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79       5.8G     0.0401     0.0334    0.01741         31        640: 1\n",
      "tensor([6.27929], device='cuda:0', grad_fn=<AddBackward0>) tensor(20099.29883, device='cuda:0', grad_fn=<AddBackward0>), tensor(7454.24805, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.539      0.351      0.359      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79       5.8G    0.04017    0.03369    0.01744         45        640: 1\n",
      "tensor([7.17069], device='cuda:0', grad_fn=<AddBackward0>) tensor(20210.14258, device='cuda:0', grad_fn=<AddBackward0>), tensor(8976.35742, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.524      0.382      0.354      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79       5.8G    0.04014    0.03366    0.01712         19        640: 1\n",
      "tensor([5.31372], device='cuda:0', grad_fn=<AddBackward0>) tensor(18215.39258, device='cuda:0', grad_fn=<AddBackward0>), tensor(6300.50293, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.515       0.39      0.362      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79       5.8G    0.03965    0.03271    0.01696         27        640: 1\n",
      "tensor([5.77708], device='cuda:0', grad_fn=<AddBackward0>) tensor(18553.63672, device='cuda:0', grad_fn=<AddBackward0>), tensor(6985.72021, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.513      0.386      0.364      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79       5.8G    0.03971    0.03291    0.01688         40        640: 1\n",
      "tensor([6.09458], device='cuda:0', grad_fn=<AddBackward0>) tensor(17296.58594, device='cuda:0', grad_fn=<AddBackward0>), tensor(7624.05078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.514      0.399      0.363      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79       5.8G    0.03918    0.03298    0.01676         40        640: 1\n",
      "tensor([6.24613], device='cuda:0', grad_fn=<AddBackward0>) tensor(18552.57422, device='cuda:0', grad_fn=<AddBackward0>), tensor(7876.74072, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.525      0.357       0.36      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79       5.8G    0.03937    0.03285    0.01683         67        640: 1\n",
      "tensor([5.24892], device='cuda:0', grad_fn=<AddBackward0>) tensor(16557.61914, device='cuda:0', grad_fn=<AddBackward0>), tensor(6043.95703, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.528      0.393      0.369      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79       5.8G    0.03911    0.03295    0.01679         30        640: 1\n",
      "tensor([6.83647], device='cuda:0', grad_fn=<AddBackward0>) tensor(19421.72461, device='cuda:0', grad_fn=<AddBackward0>), tensor(8748.99316, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.363      0.361      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79       5.8G    0.03922    0.03316    0.01691         35        640: 1\n",
      "tensor([5.84991], device='cuda:0', grad_fn=<AddBackward0>) tensor(19420.25781, device='cuda:0', grad_fn=<AddBackward0>), tensor(6879.03223, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.557      0.328      0.348      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79       5.8G    0.03881    0.03283    0.01665         28        640: 1\n",
      "tensor([6.99484], device='cuda:0', grad_fn=<AddBackward0>) tensor(20915.15820, device='cuda:0', grad_fn=<AddBackward0>), tensor(8811.77344, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.543      0.352      0.359      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79       5.8G    0.03881    0.03307    0.01661         23        640: 1\n",
      "tensor([6.17978], device='cuda:0', grad_fn=<AddBackward0>) tensor(17315.38867, device='cuda:0', grad_fn=<AddBackward0>), tensor(8081.61865, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.537      0.362      0.358      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79       5.8G    0.03877     0.0329    0.01641         50        640: 1\n",
      "tensor([5.32571], device='cuda:0', grad_fn=<AddBackward0>) tensor(18319.42578, device='cuda:0', grad_fn=<AddBackward0>), tensor(5962.60693, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521      0.372      0.344       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79       5.8G    0.03889    0.03279    0.01641         39        640: 1\n",
      "tensor([4.89793], device='cuda:0', grad_fn=<AddBackward0>) tensor(15461.03613, device='cuda:0', grad_fn=<AddBackward0>), tensor(5805.40234, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.509      0.368      0.346      0.218\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79       5.8G    0.03853    0.03296    0.01621         40        640: 1\n",
      "tensor([5.33745], device='cuda:0', grad_fn=<AddBackward0>) tensor(18117.23828, device='cuda:0', grad_fn=<AddBackward0>), tensor(6143.68262, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535       0.35      0.355      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79       5.8G    0.03852    0.03269    0.01623         23        640: 1\n",
      "tensor([6.46944], device='cuda:0', grad_fn=<AddBackward0>) tensor(19635.05078, device='cuda:0', grad_fn=<AddBackward0>), tensor(8157.82178, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.343      0.353      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79       5.8G    0.03846    0.03249    0.01632         41        640: 1\n",
      "tensor([5.91779], device='cuda:0', grad_fn=<AddBackward0>) tensor(18849.96484, device='cuda:0', grad_fn=<AddBackward0>), tensor(6956.71533, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.549      0.363      0.358      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79       5.8G    0.03838    0.03209    0.01606         47        640: 1\n",
      "tensor([6.52286], device='cuda:0', grad_fn=<AddBackward0>) tensor(20964.34375, device='cuda:0', grad_fn=<AddBackward0>), tensor(7708.17334, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.527      0.356      0.355      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79       5.8G    0.03811    0.03212    0.01619         39        640: 1\n",
      "tensor([5.22561], device='cuda:0', grad_fn=<AddBackward0>) tensor(16954.33984, device='cuda:0', grad_fn=<AddBackward0>), tensor(6072.60986, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.354      0.357      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79       5.8G    0.03815    0.03256    0.01608         33        640: 1\n",
      "tensor([5.62686], device='cuda:0', grad_fn=<AddBackward0>) tensor(18786.69922, device='cuda:0', grad_fn=<AddBackward0>), tensor(6527.55029, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.519      0.356      0.357      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79       5.8G    0.03779    0.03193    0.01579         32        640: 1\n",
      "tensor([5.22916], device='cuda:0', grad_fn=<AddBackward0>) tensor(19557.53516, device='cuda:0', grad_fn=<AddBackward0>), tensor(5668.07617, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.527       0.37      0.367      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79       5.8G    0.03791     0.0322    0.01589         39        640: 1\n",
      "tensor([5.21012], device='cuda:0', grad_fn=<AddBackward0>) tensor(20296.96289, device='cuda:0', grad_fn=<AddBackward0>), tensor(5480.28125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.549      0.361      0.361      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79       5.8G    0.03765     0.0319    0.01584         33        640: 1\n",
      "tensor([4.97534], device='cuda:0', grad_fn=<AddBackward0>) tensor(17000.41211, device='cuda:0', grad_fn=<AddBackward0>), tensor(5771.30957, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.372      0.366      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79       5.8G    0.03764    0.03182      0.016         22        640: 1\n",
      "tensor([6.12633], device='cuda:0', grad_fn=<AddBackward0>) tensor(19234.06836, device='cuda:0', grad_fn=<AddBackward0>), tensor(7792.29199, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.542      0.345      0.364      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79       5.8G    0.03704    0.03184    0.01542         26        640: 1\n",
      "tensor([5.71688], device='cuda:0', grad_fn=<AddBackward0>) tensor(19139., device='cuda:0', grad_fn=<AddBackward0>), tensor(6804.22559, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.547      0.339      0.362       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79       5.8G    0.03743    0.03184    0.01564         20        640: 1\n",
      "tensor([5.65670], device='cuda:0', grad_fn=<AddBackward0>) tensor(18433.67188, device='cuda:0', grad_fn=<AddBackward0>), tensor(6945.65283, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.534      0.366      0.362      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79       5.8G    0.03719    0.03138    0.01557         28        640: 1\n",
      "tensor([5.99934], device='cuda:0', grad_fn=<AddBackward0>) tensor(19546.11328, device='cuda:0', grad_fn=<AddBackward0>), tensor(7052.05859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.57      0.347      0.368       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79       5.8G    0.03707    0.03186    0.01538         29        640: 1\n",
      "tensor([6.20828], device='cuda:0', grad_fn=<AddBackward0>) tensor(19525.22070, device='cuda:0', grad_fn=<AddBackward0>), tensor(7640.32764, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.552      0.349       0.37      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79       5.8G    0.03705      0.032    0.01546         30        640: 1\n",
      "tensor([5.69348], device='cuda:0', grad_fn=<AddBackward0>) tensor(18901.93555, device='cuda:0', grad_fn=<AddBackward0>), tensor(6598.69824, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.534      0.355      0.366      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79       5.8G    0.03687    0.03187    0.01531         34        640: 1\n",
      "tensor([5.27126], device='cuda:0', grad_fn=<AddBackward0>) tensor(18323.17383, device='cuda:0', grad_fn=<AddBackward0>), tensor(6089.55859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.538      0.341      0.366      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79       5.8G     0.0369    0.03172    0.01538         38        640: 1\n",
      "tensor([5.07069], device='cuda:0', grad_fn=<AddBackward0>) tensor(17454.12891, device='cuda:0', grad_fn=<AddBackward0>), tensor(5660.03223, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.533       0.35      0.359      0.229\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79       5.8G    0.03679    0.03125    0.01531         20        640: 1\n",
      "tensor([6.20015], device='cuda:0', grad_fn=<AddBackward0>) tensor(20137.13086, device='cuda:0', grad_fn=<AddBackward0>), tensor(7534.67871, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.547      0.344      0.362      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79       5.8G    0.03666    0.03147    0.01518         26        640: 1\n",
      "tensor([4.78928], device='cuda:0', grad_fn=<AddBackward0>) tensor(16930.71289, device='cuda:0', grad_fn=<AddBackward0>), tensor(5449.88721, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.498      0.349       0.36      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79       5.8G    0.03673    0.03161     0.0152         30        640: 1\n",
      "tensor([5.58797], device='cuda:0', grad_fn=<AddBackward0>) tensor(18629.31836, device='cuda:0', grad_fn=<AddBackward0>), tensor(6372.32715, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.544      0.347      0.362      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79       5.8G    0.03686    0.03153    0.01526         56        640: 1\n",
      "tensor([5.15943], device='cuda:0', grad_fn=<AddBackward0>) tensor(17148.90625, device='cuda:0', grad_fn=<AddBackward0>), tensor(5639.89502, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.549      0.351      0.364      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79       5.8G    0.03635    0.03163    0.01493         50        640: 1\n",
      "tensor([5.40276], device='cuda:0', grad_fn=<AddBackward0>) tensor(17088.87109, device='cuda:0', grad_fn=<AddBackward0>), tensor(6454.62891, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.545      0.361      0.369      0.235\n",
      "\n",
      "80 epochs completed in 1.489 hours.\n",
      "Optimizer stripped from runs/train/replay_k_v_2oldmodels_openimages/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/replay_k_v_2oldmodels_openimages/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/replay_k_v_2oldmodels_openimages/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.554      0.349       0.37      0.235\n",
      "                   car       1200        287      0.662      0.564      0.531      0.344\n",
      "                   van       1200         29      0.398      0.241      0.274      0.216\n",
      "                 truck       1200         29      0.257      0.207      0.222      0.128\n",
      "                person       1200       2264      0.457      0.335      0.306      0.152\n",
      "               bicycle       1200         54      0.579      0.444      0.461      0.266\n",
      "                  bird       1200        136      0.584      0.551      0.534      0.303\n",
      "                  boat       1200        145       0.64      0.414       0.47      0.232\n",
      "                bottle       1200         31          0          0    0.00143    0.00049\n",
      "                   bus       1200         15      0.423      0.733      0.758      0.546\n",
      "                   cat       1200          1          0          0    0.00488    0.00244\n",
      "                 chair       1200         21      0.187      0.333      0.139     0.0694\n",
      "                   dog       1200         42      0.696      0.571      0.609      0.347\n",
      "                 horse       1200         44      0.814      0.596      0.687      0.396\n",
      "                 sheep       1200         10      0.356        0.5      0.362      0.217\n",
      "             billboard       1200          4          1          0     0.0214      0.005\n",
      "                rabbit       1200         11      0.763      0.296      0.643      0.438\n",
      "                monkey       1200         18      0.729      0.833      0.871      0.643\n",
      "                   pig       1200          6      0.835      0.667      0.675      0.489\n",
      "                   toy       1200         64      0.248     0.0469     0.0609     0.0267\n",
      "         traffic light       1200         18          1          0     0.0607     0.0302\n",
      "          traffic sign       1200          4          1          0     0.0833      0.075\n",
      "Results saved to \u001b[1mruns/train/replay_k_v_2oldmodels_openimages\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : replay_k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/exp-100epoch/23518c5d6afa4511b2714b00d6748d12\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.5030437834938553\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.46063166361288027\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.26624655511246265\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.5794424025601119\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.4444444444444444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.02138358778625954\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.004996983501600591\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.5670600036868617\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 54.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5343003555970992\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.30346045023259827\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.5835564481085237\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.5514705882352942\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 75.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5025525655355055\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 34.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.46961902384273785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.23221409202839965\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.6397884996397548\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.41379310344827586\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 60.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0014304953848295332\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.0004898595342535746\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.5364175904034031\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.7579021676216617\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.5464951501599128\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.4228683582350975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.7333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6091800324833971\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 83.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.5305512510443301\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.34383192088485587\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6615958602058172\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5644599303135889\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 162.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.004877450980392156\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.002438725490196078\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.23962703140616404\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 30.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.13885951548888703\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.0693841921090397\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.18704511370861931\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.3333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6276064427859381\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6089537709333787\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.34702767929006406\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.6960344798580093\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5714285714285714\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.6880324176445266\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.6865188932624204\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.3964826136603672\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.8137615227167466\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.5959551817760773\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [3848]                   : (5.391359329223633, 25.073911666870117)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.13697431383654946, 0.3698635397804883)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.07166775569146873, 0.23493351963955553)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.3969316372744585, 0.6484844544494208)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.16153655473917336, 0.39909139919552283)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.7779176954457329\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8712398780550089\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.6429661544581367\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.7294126544126545\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.8333333333333334\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.3864044547681745\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 901.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.3056420483881642\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.15179555548477502\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.4568053169039392\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.33480565371024734\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 758.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.7413218787055712\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.674505293985132\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.4885729849065106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.8348057827942885\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.42606409358950315\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.6427890448934007\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.43844875628726987\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.76258702862511\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.29561277931838165\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.4160302469838189\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.3623405911608159\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.2171804608985129\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.35620872899241696\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.07884708026581742\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.06091087867892904\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.026703831430840902\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.2480022004669892\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.046875\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.06074868421052631\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.030192610939112484\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.08329756795422032\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.07496781115879828\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.03634645789861679, 0.07715698331594467)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.014928032644093037, 0.05726327374577522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.03125128895044327, 0.045706477016210556)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.22941306199684985\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.2218694113167673\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.12797979294356804\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.2574290095282818\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.20689655172413793\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.05308541655540466, 0.06323966383934021)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.026861118152737617, 0.03741730377078056)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.02454245649278164, 0.026274103671312332)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.30055172050490436\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.2742007157430064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.21570745630367086\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.39815707827126956\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.2413793103448276\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07006224066390042)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009745755532503456)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009745755532503456)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : replay_k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/exp-100epoch/23518c5d6afa4511b2714b00d6748d12\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : [0.0001, 0.0005]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : ['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/openimages_k_v.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 80\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : replay_k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/replay_k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.28 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_LwfPro.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages_k_v.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda \\\n",
    "        1e-4 \\\n",
    "        5e-4 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "        ./runs/train/fog_02/weights/last.pt \\\n",
    "--name replay_k_v_2oldmodels_openimages \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "665f2aa4-2d18-4f24-9f00-ce970f05fa8d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/replay_k_v_2oldmodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.437      0.411      0.403      0.245\n",
      "                   car        600        113       0.37      0.487        0.3      0.193\n",
      "                   van        600          6          0          0    0.00584    0.00442\n",
      "                 truck        600         17      0.543      0.647      0.527      0.426\n",
      "                person        600       1131      0.383      0.392      0.299      0.147\n",
      "               bicycle        600         43      0.509      0.442      0.481       0.26\n",
      "                  bird        600         61       0.37      0.525      0.364      0.212\n",
      "                  boat        600         82      0.501      0.463      0.424      0.218\n",
      "                bottle        600          1          0          0          0          0\n",
      "                   bus        600          3      0.185      0.333      0.382      0.338\n",
      "                   cat        600          5      0.529        0.6      0.533      0.265\n",
      "                 chair        600         12      0.236      0.333      0.175      0.108\n",
      "                   dog        600         25      0.529        0.8       0.64      0.406\n",
      "                 horse        600         37      0.432      0.739      0.691      0.373\n",
      "                 sheep        600          8      0.359      0.625      0.634      0.488\n",
      "                 train        600          2          0          0    0.00279    0.00111\n",
      "             billboard        600          3          1          0       0.07      0.056\n",
      "                rabbit        600          1      0.392          1      0.995      0.796\n",
      "                monkey        600         16      0.456      0.688      0.445      0.247\n",
      "                   pig        600          7      0.357      0.714      0.498      0.378\n",
      "                   toy        600         42      0.456      0.262      0.285      0.148\n",
      "         traffic light        600          5          1          0      0.124     0.0996\n",
      "          traffic sign        600          1          1          0      0.995      0.233\n",
      "Speed: 0.1ms pre-process, 2.5ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp332\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/replay_k_v_2oldmodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.673      0.602      0.643      0.373\n",
      "                   car       4952       1201      0.759      0.823      0.819      0.565\n",
      "                person       4952       4528      0.701       0.67       0.71      0.393\n",
      "             aeroplane       4952        285      0.804      0.445      0.649      0.347\n",
      "               bicycle       4952        337      0.764      0.709      0.785      0.487\n",
      "                  bird       4952        459      0.663      0.569      0.598      0.315\n",
      "                  boat       4952        263       0.42      0.513      0.437      0.209\n",
      "                bottle       4952        469       0.59      0.484      0.504      0.273\n",
      "                   bus       4952        213      0.758      0.671      0.744      0.535\n",
      "                   cat       4952        358      0.746      0.599      0.682      0.377\n",
      "                 chair       4952        756       0.58      0.423      0.471      0.257\n",
      "                   cow       4952        244      0.639      0.697      0.702      0.434\n",
      "           diningtable       4952        206      0.589      0.466      0.539      0.289\n",
      "                   dog       4952        489      0.715      0.599      0.672      0.364\n",
      "                 horse       4952        348      0.804      0.744      0.804      0.485\n",
      "             motorbike       4952        325      0.778      0.668      0.751      0.431\n",
      "           pottedplant       4952        480      0.515      0.419      0.394      0.172\n",
      "                 sheep       4952        242      0.579      0.657      0.659      0.416\n",
      "                  sofa       4952        239      0.607      0.603      0.583      0.346\n",
      "                 train       4952        282      0.766      0.702      0.743      0.404\n",
      "             tvmonitor       4952        308      0.678      0.575      0.623      0.364\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp333\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/replay_k_v_2oldmodels_openimages/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 b2fc1a21 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.837      0.722      0.794      0.493\n",
      "                   car       2244       8711      0.862      0.881      0.932      0.677\n",
      "                   van       2244        861      0.845      0.739      0.843      0.583\n",
      "                 truck       2244        333      0.904      0.883      0.929       0.65\n",
      "                  tram       2244        138       0.86       0.92      0.946      0.618\n",
      "                person       2244       1286      0.832      0.633      0.713      0.362\n",
      "        person_sitting       2244         89      0.724      0.443       0.52      0.235\n",
      "               cyclist       2244        496      0.854      0.671      0.756      0.391\n",
      "                  misc       2244        284      0.818      0.602      0.714      0.429\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp334\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/replay_k_v_2oldmodels_openimages/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cc778bd2-b70d-4c92-8923-dacc279c6f2e",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
