{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "62cdf612-b534-4526-bb76-fd8b5ed77fda",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIBiC.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=10, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 3868f729 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/77cd99ee3c224754a48ca2883a9461b6\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2012.cache... 5717 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 images\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.94 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp81/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp81\u001b[0m\n",
      "Starting training for 10 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/9      3.64G    0.07605    0.04515    0.07168         19        640: 1\n",
      "tensor([0.64540], device='cuda:0', grad_fn=<MulBackward0>)  \n",
      "                 Class     Images  Instances          P          R      mAP50   "
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIBiC.yaml \\\n",
    "--epochs 10 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c97ddc0-4226-40f2-bb3d-1da104c6c119",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb4e7c3c-acba-4452-8e8b-b6599ead46d2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "82774827-34ef-48ba-9abf-38400b385d98",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "5cad9343-4df4-4655-bf9d-bce900e65fc0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/exp4/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=100.0, SI_pt=./runs/train/exp4/weights/si.pt\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 613f2732 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/b29074b839fa4f7a982d32f8e7c806a6\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/exp4/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp17/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp17\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.59G    0.08578    0.09161    0.03705        128        640: 1\n",
      "tensor([1099.64001], device='cuda:0', grad_fn=<AddBackward0>) tensor([834.28168], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.165      0.165     0.0785     0.0364\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.59G    0.07611    0.06801    0.02829        133        640: 1\n",
      "tensor([34.90103], device='cuda:0', grad_fn=<AddBackward0>) tensor([373.92542], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.317      0.139      0.121     0.0555\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.59G    0.06636    0.05256    0.02381        131        640: 1\n",
      "tensor([4.71439], device='cuda:0', grad_fn=<AddBackward0>) tensor([84.50172], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.258      0.152      0.134     0.0668\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.59G    0.05977    0.04796    0.01946        108        640: 1\n",
      "tensor([18.40718], device='cuda:0', grad_fn=<AddBackward0>) tensor([11.28635], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.322      0.156      0.154     0.0796\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.59G    0.05716    0.04625    0.01758        156        640: 1\n",
      "tensor([12.23250], device='cuda:0', grad_fn=<AddBackward0>) tensor([8.11650], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.365      0.175      0.174     0.0907\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.59G    0.05559    0.04523    0.01642        123        640: 1\n",
      "tensor([9.52412], device='cuda:0', grad_fn=<AddBackward0>) tensor([6.78586], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.353      0.179      0.183     0.0932\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.59G    0.05547    0.04508    0.01647        174        640: 1\n",
      "tensor([6.82437], device='cuda:0', grad_fn=<AddBackward0>) tensor([6.53382], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.344      0.198      0.195      0.101\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.59G    0.05485    0.04405    0.01563        166        640: 1\n",
      "tensor([7.11771], device='cuda:0', grad_fn=<AddBackward0>) tensor([6.29437], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.381      0.192      0.196        0.1\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.59G    0.05463    0.04467    0.01525        152        640: 1\n",
      "tensor([7.47672], device='cuda:0', grad_fn=<AddBackward0>) tensor([6.01688], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.424      0.183      0.196      0.101\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.59G    0.05464    0.04455     0.0153        136        640: 1\n",
      "tensor([7.30496], device='cuda:0', grad_fn=<AddBackward0>) tensor([5.81267], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.361      0.195      0.191     0.0992\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.59G    0.05439     0.0446    0.01479        134        640: 1\n",
      "tensor([6.78337], device='cuda:0', grad_fn=<AddBackward0>) tensor([5.60726], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.318      0.193      0.194     0.0994\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.59G    0.05406    0.04401    0.01453        182        640: 1\n",
      "tensor([6.65159], device='cuda:0', grad_fn=<AddBackward0>) tensor([5.41686], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.409        0.2      0.201      0.106\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.59G    0.05399     0.0444    0.01431        128        640: 1\n",
      "tensor([6.45705], device='cuda:0', grad_fn=<AddBackward0>) tensor([5.21203], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.398      0.223      0.219      0.114\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.59G     0.0535    0.04345    0.01404        112        640: 1\n",
      "tensor([6.23622], device='cuda:0', grad_fn=<AddBackward0>) tensor([5.02837], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.418      0.212      0.209      0.108\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.59G    0.05349    0.04404    0.01392        151        640: 1\n",
      "tensor([6.33457], device='cuda:0', grad_fn=<AddBackward0>) tensor([4.83872], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.357      0.206       0.21      0.109\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/59      3.59G    0.05359    0.04424    0.01393        132        640: 1\n",
      "tensor([6.51040], device='cuda:0', grad_fn=<AddBackward0>) tensor([4.65149], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.39      0.222      0.218      0.113\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/59      3.59G     0.0532    0.04358    0.01368        131        640: 1\n",
      "tensor([5.48023], device='cuda:0', grad_fn=<AddBackward0>) tensor([4.48020], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.437      0.207      0.222      0.116\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/59      3.59G    0.05314     0.0436    0.01349        159        640: 1\n",
      "tensor([5.53518], device='cuda:0', grad_fn=<AddBackward0>) tensor([4.30421], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.373      0.217      0.209      0.109\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/59      3.59G    0.05299    0.04345     0.0132        125        640: 1\n",
      "tensor([5.45061], device='cuda:0', grad_fn=<AddBackward0>) tensor([4.11972], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.369      0.221      0.215      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/59      3.59G    0.05319    0.04408    0.01354         88        640: 1\n",
      "tensor([5.20792], device='cuda:0', grad_fn=<AddBackward0>) tensor([3.95090], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.375      0.235      0.222      0.114\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/59      3.59G    0.05292    0.04291    0.01322        137        640: 1\n",
      "tensor([5.44414], device='cuda:0', grad_fn=<AddBackward0>) tensor([3.78407], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.413      0.237      0.236      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/59      3.59G    0.05276    0.04393    0.01294        166        640: 1\n",
      "tensor([5.17697], device='cuda:0', grad_fn=<AddBackward0>) tensor([3.62349], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.437      0.256      0.248      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/59      3.59G     0.0525    0.04349    0.01291        161        640: 1\n",
      "tensor([4.54427], device='cuda:0', grad_fn=<AddBackward0>) tensor([3.46613], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.388      0.236      0.233      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/59      3.59G    0.05285    0.04308    0.01316        118        640: 1\n",
      "tensor([4.67042], device='cuda:0', grad_fn=<AddBackward0>) tensor([3.34094], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.4      0.215      0.216      0.112\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/59      3.59G    0.05245     0.0433    0.01287        151        640: 1\n",
      "tensor([4.58838], device='cuda:0', grad_fn=<AddBackward0>) tensor([3.15611], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.419       0.22      0.228       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/59      3.59G    0.05252    0.04373     0.0129        133        640: 1\n",
      "tensor([4.49163], device='cuda:0', grad_fn=<AddBackward0>) tensor([3.01181], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.406      0.241      0.237      0.122\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/59      3.59G    0.05233    0.04314    0.01266        154        640: 1\n",
      "tensor([4.21951], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.86820], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.359      0.229      0.221      0.116\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/59      3.59G    0.05261     0.0437    0.01273        122        640: 1\n",
      "tensor([5.04711], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.74881], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.46      0.223      0.237      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/59      3.59G    0.05209    0.04331    0.01218        123        640: 1\n",
      "tensor([3.70072], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.64582], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.389      0.252      0.238      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/59      3.59G    0.05198    0.04302    0.01231        127        640: 1\n",
      "tensor([3.85455], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.45741], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.469       0.21      0.225      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/59      3.59G     0.0522    0.04188    0.01244        127        640: 1\n",
      "tensor([3.19420], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.33511], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.432      0.227      0.228      0.116\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/59      3.59G    0.05193    0.04273    0.01215        122        640: 1\n",
      "tensor([3.65362], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.21346], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.428      0.225      0.232       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/59      3.59G    0.05186    0.04321    0.01223        146        640: 1\n",
      "tensor([3.56909], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.07593], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.46      0.249      0.242      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/59      3.59G    0.05169     0.0425    0.01208        202        640: 1\n",
      "tensor([3.13904], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.96464], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.392      0.253      0.239      0.125\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/59      3.59G    0.05222    0.04265    0.01232         94        640: 1\n",
      "tensor([2.88163], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.84791], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.4      0.244      0.227      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/59      3.59G    0.05183    0.04276    0.01199        152        640: 1\n",
      "tensor([3.43715], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.73378], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.357      0.218      0.207      0.108\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/59      3.59G    0.05191    0.04259    0.01232        123        640: 1\n",
      "tensor([2.78570], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.63780], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.386      0.236      0.218      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/59      3.59G    0.05178    0.04267    0.01225        162        640: 1\n",
      "tensor([3.04847], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.52635], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.417      0.251      0.243      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/59      3.59G    0.05158    0.04305    0.01198        161        640: 1\n",
      "tensor([2.84742], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.42306], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.42      0.265      0.249      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/59      3.59G    0.05146    0.04308    0.01175        122        640: 1\n",
      "tensor([2.54767], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.32946], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.381      0.259      0.235      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/59      3.59G    0.05191    0.04274    0.01209        126        640: 1\n",
      "tensor([2.47552], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.23620], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.413      0.225      0.228      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/59      3.59G    0.05148    0.04285     0.0118         90        640: 1\n",
      "tensor([2.36242], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.14712], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.438      0.251      0.237      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/59      3.59G    0.05149    0.04267     0.0118        118        640: 1\n",
      "tensor([2.71168], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.06612], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.464      0.237       0.24      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/59      3.59G    0.05164    0.04268    0.01158        168        640:  ^C\n",
      "      43/59      3.59G    0.05164    0.04268    0.01158        168        640:  \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/exp4/weights/best.pt \\\n",
    "--SI_enable 1e2 \\\n",
    "--SI_pt ./runs/train/exp4/weights/si.pt\\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 0.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "40570ca0-69de-4b1e-9c83-735fdf2882d0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6947f7c4-5ff1-4fd5-9beb-374b1a7f8250",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2da11cfe-3d12-465c-85f4-64075959699a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e7515d2b-40a1-4bdb-be90-ad29cc15305e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b254fc44-6d04-43c2-bdcd-a05167b41f51",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2466c782-b985-4e68-a2e2-363c9df6fd0a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4aac0462-6a45-4a24-8c16-b254670d0a5b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "356df5e5-bf3d-42f4-9eb4-ebfdecf0a327",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_LwfPro: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_2oldmodels_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.0001, 0.001], Lwf_temperature=1.0, Old_models=['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/b7670968741a4ad0b909fdbff4a161df\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Overriding model.yaml nc=36 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_2oldmodels_openimages4/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_2oldmodels_openimages4\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.68G    0.08701    0.04282    0.06473         40        640: 1\n",
      "tensor([19.02633], device='cuda:0', grad_fn=<AddBackward0>) tensor(26779.69727, device='cuda:0', grad_fn=<AddBackward0>), tensor(15142.27148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.861      0.039     0.0517     0.0227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.79G    0.06631    0.04094    0.04659         63        640: 1\n",
      "tensor([17.18425], device='cuda:0', grad_fn=<AddBackward0>) tensor(23726.38672, device='cuda:0', grad_fn=<AddBackward0>), tensor(13424.88672, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.77      0.124      0.126     0.0639\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.79G     0.0623       0.04    0.04298         57        640: 1\n",
      "tensor([16.80366], device='cuda:0', grad_fn=<AddBackward0>) tensor(24395.15234, device='cuda:0', grad_fn=<AddBackward0>), tensor(13277.68945, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.738      0.149       0.17     0.0873\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.79G    0.06074    0.03988    0.04066         42        640: 1\n",
      "tensor([15.29276], device='cuda:0', grad_fn=<AddBackward0>) tensor(23772.01758, device='cuda:0', grad_fn=<AddBackward0>), tensor(11842.97461, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.61      0.174      0.171     0.0866\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.79G    0.05918    0.03864    0.03937         36        640: 1\n",
      "tensor([13.81472], device='cuda:0', grad_fn=<AddBackward0>) tensor(23168.61719, device='cuda:0', grad_fn=<AddBackward0>), tensor(10344.22168, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.195      0.185     0.0913\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.79G    0.05828    0.03963    0.03838         39        640: 1\n",
      "tensor([14.31044], device='cuda:0', grad_fn=<AddBackward0>) tensor(22099.93359, device='cuda:0', grad_fn=<AddBackward0>), tensor(11089.67285, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.562      0.192      0.176     0.0908\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.79G    0.05796    0.03912    0.03778         68        640: 1\n",
      "tensor([13.48946], device='cuda:0', grad_fn=<AddBackward0>) tensor(22753.17969, device='cuda:0', grad_fn=<AddBackward0>), tensor(9739.24414, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.618      0.213        0.2      0.106\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.79G     0.0579    0.03895    0.03778         31        640: 1\n",
      "tensor([12.21820], device='cuda:0', grad_fn=<AddBackward0>) tensor(24262.86523, device='cuda:0', grad_fn=<AddBackward0>), tensor(8864.03906, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.599      0.236       0.21      0.107\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.79G    0.05707    0.03897    0.03718         35        640: 1\n",
      "tensor([13.93920], device='cuda:0', grad_fn=<AddBackward0>) tensor(25127.59180, device='cuda:0', grad_fn=<AddBackward0>), tensor(10441.21191, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.549      0.212      0.196     0.0952\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.79G    0.05681    0.03897    0.03648         42        640: 1\n",
      "tensor([12.87174], device='cuda:0', grad_fn=<AddBackward0>) tensor(23554.14258, device='cuda:0', grad_fn=<AddBackward0>), tensor(9394.53906, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.512      0.227      0.212      0.105\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.79G    0.05626    0.03847    0.03621         38        640: 1\n",
      "tensor([13.13673], device='cuda:0', grad_fn=<AddBackward0>) tensor(23600.51367, device='cuda:0', grad_fn=<AddBackward0>), tensor(9827.02441, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.451      0.241      0.217      0.106\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.79G    0.05604    0.03899    0.03543         59        640: 1\n",
      "tensor([13.56419], device='cuda:0', grad_fn=<AddBackward0>) tensor(23786.58984, device='cuda:0', grad_fn=<AddBackward0>), tensor(9972.94238, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.608      0.234      0.212      0.104\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.79G    0.05548    0.03869    0.03633         46        640: 1\n",
      "tensor([13.34889], device='cuda:0', grad_fn=<AddBackward0>) tensor(23900.41992, device='cuda:0', grad_fn=<AddBackward0>), tensor(9850.14648, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.534      0.225      0.209      0.105\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.79G    0.05522     0.0383    0.03504         47        640: 1\n",
      "tensor([13.25201], device='cuda:0', grad_fn=<AddBackward0>) tensor(24157.52148, device='cuda:0', grad_fn=<AddBackward0>), tensor(9759.27051, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.612       0.23      0.211      0.103\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.79G    0.05485    0.03784      0.035         32        640: 1\n",
      "tensor([11.84126], device='cuda:0', grad_fn=<AddBackward0>) tensor(24355.56055, device='cuda:0', grad_fn=<AddBackward0>), tensor(8575.83887, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.612      0.237       0.21      0.106\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.79G    0.05417    0.03861    0.03418         48        640: 1\n",
      "tensor([12.06536], device='cuda:0', grad_fn=<AddBackward0>) tensor(22839.81641, device='cuda:0', grad_fn=<AddBackward0>), tensor(8740.08203, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.475      0.242      0.211      0.104\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.79G    0.05467    0.03792    0.03449         43        640: 1\n",
      "tensor([10.93924], device='cuda:0', grad_fn=<AddBackward0>) tensor(21442.15625, device='cuda:0', grad_fn=<AddBackward0>), tensor(7719.98193, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.437      0.222      0.196     0.0975\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.79G    0.05453    0.03853    0.03403         64        640: 1\n",
      "tensor([12.18847], device='cuda:0', grad_fn=<AddBackward0>) tensor(23044.41016, device='cuda:0', grad_fn=<AddBackward0>), tensor(8874.56250, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.236      0.225      0.112\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.79G    0.05387    0.03845    0.03422         61        640: 1\n",
      "tensor([11.72172], device='cuda:0', grad_fn=<AddBackward0>) tensor(22543.40625, device='cuda:0', grad_fn=<AddBackward0>), tensor(8338.49023, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591      0.238      0.231      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.79G    0.05369    0.03865    0.03373         29        640: 1\n",
      "tensor([11.49901], device='cuda:0', grad_fn=<AddBackward0>) tensor(23008.19141, device='cuda:0', grad_fn=<AddBackward0>), tensor(8160.09229, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.557      0.221      0.227       0.11\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.79G     0.0533    0.03729    0.03306         39        640: 1\n",
      "tensor([11.10768], device='cuda:0', grad_fn=<AddBackward0>) tensor(22423.27539, device='cuda:0', grad_fn=<AddBackward0>), tensor(7776.83350, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.597      0.254      0.239      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.79G    0.05289    0.03783    0.03354         40        640: 1\n",
      "tensor([11.86408], device='cuda:0', grad_fn=<AddBackward0>) tensor(26267.08203, device='cuda:0', grad_fn=<AddBackward0>), tensor(8271.52930, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.245       0.22      0.112\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.79G    0.05286    0.03773    0.03376         44        640: 1\n",
      "tensor([11.32385], device='cuda:0', grad_fn=<AddBackward0>) tensor(21985.75586, device='cuda:0', grad_fn=<AddBackward0>), tensor(8075.11426, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.576      0.239      0.237      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79      5.79G    0.05246    0.03799     0.0328         31        640: 1\n",
      "tensor([12.18908], device='cuda:0', grad_fn=<AddBackward0>) tensor(23794.32812, device='cuda:0', grad_fn=<AddBackward0>), tensor(8932.05762, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.472      0.249      0.225      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.79G    0.05214    0.03751    0.03271         72        640: 1\n",
      "tensor([11.86705], device='cuda:0', grad_fn=<AddBackward0>) tensor(24827.07031, device='cuda:0', grad_fn=<AddBackward0>), tensor(8282.93164, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.466      0.264       0.24      0.122\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.79G    0.05212    0.03792    0.03256         41        640: 1\n",
      "tensor([11.54488], device='cuda:0', grad_fn=<AddBackward0>) tensor(26947.21094, device='cuda:0', grad_fn=<AddBackward0>), tensor(7953.23730, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.478      0.237      0.236      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.79G    0.05237    0.03806    0.03261         33        640: 1\n",
      "tensor([11.57454], device='cuda:0', grad_fn=<AddBackward0>) tensor(23724.15430, device='cuda:0', grad_fn=<AddBackward0>), tensor(8196.18066, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.481      0.227      0.222      0.117\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.79G    0.05234    0.03738    0.03174         30        640: 1\n",
      "tensor([11.29442], device='cuda:0', grad_fn=<AddBackward0>) tensor(24409.80273, device='cuda:0', grad_fn=<AddBackward0>), tensor(8077.04736, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.454      0.242      0.235      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.79G    0.05222    0.03719    0.03182         30        640: 1\n",
      "tensor([10.78041], device='cuda:0', grad_fn=<AddBackward0>) tensor(23345.46094, device='cuda:0', grad_fn=<AddBackward0>), tensor(7656.49365, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.449       0.27      0.257      0.129\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.79G    0.05163    0.03718    0.03165         43        640: 1\n",
      "tensor([11.20528], device='cuda:0', grad_fn=<AddBackward0>) tensor(24268.11133, device='cuda:0', grad_fn=<AddBackward0>), tensor(7751.82910, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "      31/79      5.79G    0.05197    0.03698    0.03134         40        640: 1\n",
      "tensor([11.73576], device='cuda:0', grad_fn=<AddBackward0>) tensor(24368.69531, device='cuda:0', grad_fn=<AddBackward0>), tensor(8376.90234, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.42      0.274      0.233      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.79G    0.05171    0.03703    0.03108         37        640: 1\n",
      "tensor([11.72977], device='cuda:0', grad_fn=<AddBackward0>) tensor(24888.20117, device='cuda:0', grad_fn=<AddBackward0>), tensor(8327.29980, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.48      0.274      0.254      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.79G    0.05074    0.03659    0.03157         45        640: 1\n",
      "tensor([10.84999], device='cuda:0', grad_fn=<AddBackward0>) tensor(22503.19922, device='cuda:0', grad_fn=<AddBackward0>), tensor(7506.49414, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.457      0.265      0.246      0.128\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.79G    0.05157    0.03751    0.03097         70        640: 1\n",
      "tensor([11.25888], device='cuda:0', grad_fn=<AddBackward0>) tensor(22937.51562, device='cuda:0', grad_fn=<AddBackward0>), tensor(7875.35889, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.275      0.255      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.79G    0.05118    0.03679    0.03082         34        640: 1\n",
      "tensor([10.38402], device='cuda:0', grad_fn=<AddBackward0>) tensor(23655.02539, device='cuda:0', grad_fn=<AddBackward0>), tensor(7135.30908, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.263       0.25      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.79G    0.05043    0.03639    0.03068         52        640: 1\n",
      "tensor([10.49340], device='cuda:0', grad_fn=<AddBackward0>) tensor(23256.44531, device='cuda:0', grad_fn=<AddBackward0>), tensor(7197.15674, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.497      0.266      0.249      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.79G    0.05072    0.03664    0.03055         46        640: 1\n",
      "tensor([10.01598], device='cuda:0', grad_fn=<AddBackward0>) tensor(23996.09375, device='cuda:0', grad_fn=<AddBackward0>), tensor(6722.45410, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.503      0.267      0.263      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.79G    0.05036    0.03625    0.03047         42        640: 1\n",
      "tensor([10.69509], device='cuda:0', grad_fn=<AddBackward0>) tensor(23738.15430, device='cuda:0', grad_fn=<AddBackward0>), tensor(7275.61768, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.525       0.23      0.254      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.79G    0.05085     0.0365    0.03068         56        640: 1\n",
      "tensor([9.45546], device='cuda:0', grad_fn=<AddBackward0>) tensor(21175.84961, device='cuda:0', grad_fn=<AddBackward0>), tensor(6329.77588, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.478      0.254      0.242      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.79G    0.05059    0.03654    0.03046         92        640:  "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "IOPub message rate exceeded.\n",
      "The Jupyter server will temporarily stop sending output\n",
      "to the client in order to avoid crashing it.\n",
      "To change this limit, set the config variable\n",
      "`--ServerApp.iopub_msg_rate_limit`.\n",
      "\n",
      "Current values:\n",
      "ServerApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n",
      "ServerApp.rate_limit_window=3.0 (secs)\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482      0.274      0.246      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.79G    0.05011    0.03693    0.02989         39        640: 1\n",
      "tensor([10.70370], device='cuda:0', grad_fn=<AddBackward0>) tensor(23211.20508, device='cuda:0', grad_fn=<AddBackward0>), tensor(7440.27148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.499      0.269      0.243      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.79G     0.0496    0.03658    0.02989         45        640: 1\n",
      "tensor([10.28663], device='cuda:0', grad_fn=<AddBackward0>) tensor(22805.98438, device='cuda:0', grad_fn=<AddBackward0>), tensor(7059.64893, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.51      0.258      0.249       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.79G    0.04984    0.03691    0.03047         50        640: 1\n",
      "tensor([10.47047], device='cuda:0', grad_fn=<AddBackward0>) tensor(23973.60938, device='cuda:0', grad_fn=<AddBackward0>), tensor(7062.67139, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572      0.273      0.269       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.79G    0.04999    0.03628    0.02939         28        640: 1\n",
      "tensor([10.68186], device='cuda:0', grad_fn=<AddBackward0>) tensor(24980.20898, device='cuda:0', grad_fn=<AddBackward0>), tensor(7333.90820, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.279       0.26       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.79G    0.04921    0.03698    0.02995         36        640: 1\n",
      "tensor([10.02777], device='cuda:0', grad_fn=<AddBackward0>) tensor(23091.27344, device='cuda:0', grad_fn=<AddBackward0>), tensor(6885.75635, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.507      0.275      0.253      0.137\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.79G    0.04942    0.03635    0.02987         66        640: 1\n",
      "tensor([10.18965], device='cuda:0', grad_fn=<AddBackward0>) tensor(22693.46484, device='cuda:0', grad_fn=<AddBackward0>), tensor(6888.74707, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.481       0.25      0.243      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.79G    0.04923    0.03651     0.0299         54        640: 1\n",
      "tensor([9.42644], device='cuda:0', grad_fn=<AddBackward0>) tensor(21635.98242, device='cuda:0', grad_fn=<AddBackward0>), tensor(6377.96094, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.509      0.261      0.256      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.79G    0.04939    0.03661    0.02915         41        640: 1\n",
      "tensor([9.68099], device='cuda:0', grad_fn=<AddBackward0>) tensor(21104.15820, device='cuda:0', grad_fn=<AddBackward0>), tensor(6685.20752, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.544      0.235      0.263      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.79G    0.04897    0.03609    0.02917         45        640: 1\n",
      "tensor([9.88763], device='cuda:0', grad_fn=<AddBackward0>) tensor(23610.07031, device='cuda:0', grad_fn=<AddBackward0>), tensor(6531., device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.514      0.248      0.256      0.139\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.79G    0.04885    0.03626    0.02914         67        640: 1\n",
      "tensor([9.78537], device='cuda:0', grad_fn=<AddBackward0>) tensor(22729.76172, device='cuda:0', grad_fn=<AddBackward0>), tensor(6491.92383, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.276       0.27      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.79G    0.04889    0.03644    0.02962         61        640: 1\n",
      "tensor([10.18190], device='cuda:0', grad_fn=<AddBackward0>) tensor(23659.88672, device='cuda:0', grad_fn=<AddBackward0>), tensor(6857.30908, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.268       0.26      0.139\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.79G    0.04864     0.0354    0.02904         55        640: 1\n",
      "tensor([9.58352], device='cuda:0', grad_fn=<AddBackward0>) tensor(24503.31641, device='cuda:0', grad_fn=<AddBackward0>), tensor(6114.30957, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.548      0.249      0.272      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.79G    0.04877    0.03581     0.0288         34        640: 1\n",
      "tensor([9.45226], device='cuda:0', grad_fn=<AddBackward0>) tensor(22713.71094, device='cuda:0', grad_fn=<AddBackward0>), tensor(6341.93359, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.576      0.251      0.262      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.79G    0.04868    0.03611    0.02855         36        640: 1\n",
      "tensor([11.05652], device='cuda:0', grad_fn=<AddBackward0>) tensor(25233.06445, device='cuda:0', grad_fn=<AddBackward0>), tensor(7577.82812, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.491      0.286      0.262      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.79G    0.04876    0.03632    0.02884         52        640: 1\n",
      "tensor([9.97059], device='cuda:0', grad_fn=<AddBackward0>) tensor(23834.24805, device='cuda:0', grad_fn=<AddBackward0>), tensor(6580.76953, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.604      0.225      0.263      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.79G    0.04839    0.03543    0.02869         31        640: 1\n",
      "tensor([10.44586], device='cuda:0', grad_fn=<AddBackward0>) tensor(25253.24219, device='cuda:0', grad_fn=<AddBackward0>), tensor(7205.29541, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.534      0.261      0.275       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.79G    0.04834    0.03587    0.02877         49        640: 1\n",
      "tensor([8.70160], device='cuda:0', grad_fn=<AddBackward0>) tensor(21657.43945, device='cuda:0', grad_fn=<AddBackward0>), tensor(5636.76465, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.511      0.278      0.263      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.79G    0.04824    0.03546    0.02862         52        640: 1\n",
      "tensor([8.99891], device='cuda:0', grad_fn=<AddBackward0>) tensor(21956.27930, device='cuda:0', grad_fn=<AddBackward0>), tensor(5832.50537, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.255      0.252      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.79G    0.04874    0.03597     0.0288         41        640: 1\n",
      "tensor([8.81815], device='cuda:0', grad_fn=<AddBackward0>) tensor(22474.19336, device='cuda:0', grad_fn=<AddBackward0>), tensor(5532.86816, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572      0.245      0.274      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.79G    0.04815    0.03553    0.02882         34        640: 1\n",
      "tensor([9.61309], device='cuda:0', grad_fn=<AddBackward0>) tensor(22693.13672, device='cuda:0', grad_fn=<AddBackward0>), tensor(6574.60596, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.487      0.276      0.259      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.79G    0.04783    0.03513    0.02917         37        640: 1\n",
      "tensor([9.01167], device='cuda:0', grad_fn=<AddBackward0>) tensor(22658.03125, device='cuda:0', grad_fn=<AddBackward0>), tensor(5926.28418, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.265       0.27      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.79G     0.0482    0.03548    0.02832         33        640: 1\n",
      "tensor([9.10153], device='cuda:0', grad_fn=<AddBackward0>) tensor(22888.61719, device='cuda:0', grad_fn=<AddBackward0>), tensor(5847.51025, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.475      0.287      0.257       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.79G    0.04834    0.03562    0.02864         20        640: 1\n",
      "tensor([10.04367], device='cuda:0', grad_fn=<AddBackward0>) tensor(26141.79883, device='cuda:0', grad_fn=<AddBackward0>), tensor(6483.81104, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.544      0.236      0.263      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.79G    0.04769    0.03603    0.02866         49        640: 1\n",
      "tensor([9.46573], device='cuda:0', grad_fn=<AddBackward0>) tensor(23109.86133, device='cuda:0', grad_fn=<AddBackward0>), tensor(6174.09229, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.494      0.263      0.267      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.79G     0.0477    0.03565    0.02849         93        640: 1\n",
      "tensor([9.03424], device='cuda:0', grad_fn=<AddBackward0>) tensor(21557.66602, device='cuda:0', grad_fn=<AddBackward0>), tensor(5670.99951, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.494      0.258      0.256       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.79G    0.04775     0.0357    0.02834         32        640: 1\n",
      "tensor([9.12569], device='cuda:0', grad_fn=<AddBackward0>) tensor(21446.03320, device='cuda:0', grad_fn=<AddBackward0>), tensor(6144.21191, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.484      0.277      0.253      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.79G     0.0479    0.03552    0.02854         47        640: 1\n",
      "tensor([9.62761], device='cuda:0', grad_fn=<AddBackward0>) tensor(22225.58789, device='cuda:0', grad_fn=<AddBackward0>), tensor(6381.32715, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517       0.26      0.269      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.79G    0.04764    0.03618    0.02789         43        640: 1\n",
      "tensor([9.30110], device='cuda:0', grad_fn=<AddBackward0>) tensor(22973.72852, device='cuda:0', grad_fn=<AddBackward0>), tensor(6163.33301, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.479      0.295       0.26       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.79G    0.04768    0.03505    0.02814         37        640: 1\n",
      "tensor([9.21756], device='cuda:0', grad_fn=<AddBackward0>) tensor(22996.36133, device='cuda:0', grad_fn=<AddBackward0>), tensor(6058.01807, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.534      0.232      0.251      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.79G     0.0475    0.03551    0.02791         53        640: 1\n",
      "tensor([8.94725], device='cuda:0', grad_fn=<AddBackward0>) tensor(23130.36133, device='cuda:0', grad_fn=<AddBackward0>), tensor(5690.78857, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.489      0.268      0.258      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.79G    0.04784    0.03603    0.02834         59        640: 1\n",
      "tensor([9.50507], device='cuda:0', grad_fn=<AddBackward0>) tensor(23316.81641, device='cuda:0', grad_fn=<AddBackward0>), tensor(6062.80664, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.476      0.281       0.26      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.79G    0.04707    0.03489    0.02822         26        640: 1\n",
      "tensor([9.60987], device='cuda:0', grad_fn=<AddBackward0>) tensor(23467.10352, device='cuda:0', grad_fn=<AddBackward0>), tensor(6551.68799, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.495      0.258      0.248      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.79G    0.04768    0.03547    0.02775         54        640: 1\n",
      "tensor([9.17165], device='cuda:0', grad_fn=<AddBackward0>) tensor(22482.99805, device='cuda:0', grad_fn=<AddBackward0>), tensor(5926.57568, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.494      0.257      0.261      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.79G    0.04706    0.03489    0.02798         31        640: 1\n",
      "tensor([9.66164], device='cuda:0', grad_fn=<AddBackward0>) tensor(22997.03125, device='cuda:0', grad_fn=<AddBackward0>), tensor(6400.40332, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.462      0.289      0.264      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.79G    0.04731    0.03546    0.02739         56        640: 1\n",
      "tensor([9.65964], device='cuda:0', grad_fn=<AddBackward0>) tensor(24141.52734, device='cuda:0', grad_fn=<AddBackward0>), tensor(6225.91357, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.472      0.314      0.262      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.79G    0.04715    0.03507     0.0278         45        640: 1\n",
      "tensor([8.55128], device='cuda:0', grad_fn=<AddBackward0>) tensor(22780.97266, device='cuda:0', grad_fn=<AddBackward0>), tensor(5520.38184, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482      0.302      0.264      0.142\n",
      "\n",
      "80 epochs completed in 1.421 hours.\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages4/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages4/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_2oldmodels_openimages4/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.262      0.275      0.151\n",
      "                   car       1200        287      0.627      0.516      0.472       0.28\n",
      "                   van       1200         29       0.31      0.103      0.185      0.136\n",
      "                 truck       1200         29      0.237      0.345      0.188      0.113\n",
      "                person       1200       2264      0.406      0.299      0.274      0.123\n",
      "               bicycle       1200         54      0.371      0.333      0.347      0.167\n",
      "                  bird       1200        136      0.521       0.44      0.445      0.207\n",
      "                  boat       1200        145      0.425      0.372      0.319      0.126\n",
      "                bottle       1200         31          1          0   0.000303   0.000138\n",
      "                   bus       1200         15      0.492        0.6      0.631      0.439\n",
      "                   cat       1200          1          1          0     0.0249    0.00995\n",
      "                 chair       1200         21     0.0423     0.0476     0.0259     0.0104\n",
      "                   dog       1200         42      0.771      0.333      0.477       0.23\n",
      "                 horse       1200         44      0.568      0.386      0.504      0.253\n",
      "                 sheep       1200         10      0.424        0.3      0.401      0.234\n",
      "             billboard       1200          4          1          0    0.00305   0.000611\n",
      "                rabbit       1200         11          0          0      0.225      0.114\n",
      "                monkey       1200         18      0.425      0.889      0.652      0.385\n",
      "                   pig       1200          6      0.472        0.5      0.508      0.289\n",
      "                   toy       1200         64     0.0876     0.0312     0.0222     0.0084\n",
      "         traffic light       1200         18          1          0          0          0\n",
      "          traffic sign       1200          4          1          0     0.0825     0.0355\n",
      "Results saved to \u001b[1mruns/train/k_v_2oldmodels_openimages4\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/b7670968741a4ad0b909fdbff4a161df\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.3510390002946161\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 31.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.3465468055712667\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.1668183782862163\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.37073112583316664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.3333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 18.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.003053129548762736\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.0006106259097525472\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.4770255824263296\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 55.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.4446950457022434\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.206747291445961\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.5210063228751253\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.4398920979803333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 60.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.3968462950376369\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 73.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.31938588199213114\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.12583461407196597\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.42470971217280273\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.3724137931034483\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 54.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0003030196826985275\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.00013813064799653917\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.5408495391153212\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.6306974920312658\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.43932768861310467\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.4923150975782554\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.5658391293108274\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 88.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.4715118740763625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.2801443512037828\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6268082142071987\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5156794425087108\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 148.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.024875\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.00995\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.04477576253450943\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 23.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.025864105941397937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.01035393736997953\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.042252885549987\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.047619047619047616\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.4653487968483037\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.47683617413622525\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.22997335943520986\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.7705042068846316\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.3333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.4597937323914862\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.5042284406902293\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.2532724119884193\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.567684685487412\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.38636363636363635\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (9.051091194152832, 41.369720458984375)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.05165492097561299, 0.27533091677486327)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.022718866138681872, 0.1504473385823227)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.4200882851955502, 0.8608197653726315)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.03896024242962176, 0.3138809876367435)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.5750919463186975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 22.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.6522092814306437\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.38534415544490247\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.4250427067491135\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.8888888888888888\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.3447084012510523\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 992.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.27405489739619393\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.12313939533456182\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.4060464629337668\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.299469964664311\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 678.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.48576309253253735\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.5076418533157664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.2886416942182432\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.47231449695515404\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.2250996525785683\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.11420982569343341\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.35129181782099655\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.4005018295463392\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.23403587492649985\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.42373975967725963\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.3\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.046071823468881876\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.022206132565962336\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.008403409452079019\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.08763872732164829\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.03125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.08247863247863248\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.035462315462315465\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.04705801233649254, 0.08701267838478088)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.02739197015762329, 0.0647267997264862)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.0348886102437973, 0.04281514883041382)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.2812034417584501\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 32.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.18834957141467126\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.11323252894537225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.23740064437738853\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.3448275862068966\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.0566844567656517, 0.07035847753286362)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.0320461131632328, 0.04803663492202759)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.025080455467104912, 0.02759549580514431)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.15511739478806502\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.18456983472615135\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.13564250051675542\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.3099051432384764\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.10344827586206896\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/b7670968741a4ad0b909fdbff4a161df\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : [0.0001, 0.001]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : ['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 80\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_2oldmodels_openimages4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.16 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_LwfPro.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda \\\n",
    "        1e-4 \\\n",
    "        1e-3 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "        ./runs/train/fog_02/weights/last.pt \\\n",
    "--name k_v_2oldmodels_openimages \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5fa519f0-74c6-4ccb-a64e-500c514e1954",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "76931a5d-8a59-46b4-87c3-f2e29ceb2655",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8fda1df1-48c3-40b2-859b-dd584da087dd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eefa662f-e32c-4961-b380-c59cf7bebe04",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db2e3b21-45f6-4159-8e95-8f40d85aaa47",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6920b87e-c65d-4153-9704-3ac0bf1504bf",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8f50682f-bef8-47b2-adce-f985d33ae668",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_LwfPro: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_2oldmodels_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.0001, 0.0005], Lwf_temperature=1.0, Old_models=['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/22e86818b233466c9158430e81a88fd3\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Overriding model.yaml nc=36 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_2oldmodels_openimages6/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_2oldmodels_openimages6\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.68G    0.08587    0.04254    0.06225         40        640: 1\n",
      "tensor([11.53669], device='cuda:0', grad_fn=<AddBackward0>) tensor(19523.33203, device='cuda:0', grad_fn=<AddBackward0>), tensor(16935.19531, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.855     0.0634      0.068     0.0329\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.79G    0.06369    0.03885    0.03996         63        640: 1\n",
      "tensor([10.40685], device='cuda:0', grad_fn=<AddBackward0>) tensor(15737.83008, device='cuda:0', grad_fn=<AddBackward0>), tensor(15082.92773, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.703      0.182      0.196      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.79G    0.05932    0.03774    0.03565         57        640: 1\n",
      "tensor([10.07585], device='cuda:0', grad_fn=<AddBackward0>) tensor(14262.96191, device='cuda:0', grad_fn=<AddBackward0>), tensor(15318.43066, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.662      0.229       0.23      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.79G    0.05743    0.03762    0.03322         42        640: 1\n",
      "tensor([9.30316], device='cuda:0', grad_fn=<AddBackward0>) tensor(13682.45703, device='cuda:0', grad_fn=<AddBackward0>), tensor(13965.04883, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.648      0.244      0.254       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.79G    0.05547    0.03645    0.03191         36        640: 1\n",
      "tensor([8.52993], device='cuda:0', grad_fn=<AddBackward0>) tensor(14276.24121, device='cuda:0', grad_fn=<AddBackward0>), tensor(12074.11816, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.606      0.257      0.262      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.79G    0.05467    0.03734    0.03073         39        640: 1\n",
      "tensor([8.94128], device='cuda:0', grad_fn=<AddBackward0>) tensor(16058.26953, device='cuda:0', grad_fn=<AddBackward0>), tensor(12781.99609, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.495      0.268      0.264      0.154\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.79G    0.05397    0.03684    0.02986         68        640: 1\n",
      "tensor([8.65862], device='cuda:0', grad_fn=<AddBackward0>) tensor(17145.33594, device='cuda:0', grad_fn=<AddBackward0>), tensor(11200.42090, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.56      0.278      0.278      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.79G     0.0538    0.03665    0.03004         31        640: 1\n",
      "tensor([7.79914], device='cuda:0', grad_fn=<AddBackward0>) tensor(17189.51758, device='cuda:0', grad_fn=<AddBackward0>), tensor(10540.08301, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.553      0.311      0.295      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.79G    0.05324    0.03657    0.02921         35        640: 1\n",
      "tensor([8.58282], device='cuda:0', grad_fn=<AddBackward0>) tensor(17124.71484, device='cuda:0', grad_fn=<AddBackward0>), tensor(12056.21484, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.51      0.344      0.282      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.79G    0.05276    0.03645     0.0285         42        640: 1\n",
      "tensor([8.22159], device='cuda:0', grad_fn=<AddBackward0>) tensor(17879.16406, device='cuda:0', grad_fn=<AddBackward0>), tensor(10878.94238, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.507      0.334      0.304      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.79G    0.05219    0.03604    0.02805         38        640: 1\n",
      "tensor([8.24858], device='cuda:0', grad_fn=<AddBackward0>) tensor(17524.85547, device='cuda:0', grad_fn=<AddBackward0>), tensor(11296.42285, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482       0.34      0.286      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.79G     0.0517    0.03645    0.02733         59        640: 1\n",
      "tensor([8.65837], device='cuda:0', grad_fn=<AddBackward0>) tensor(18361.18945, device='cuda:0', grad_fn=<AddBackward0>), tensor(11509.88770, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.505      0.344      0.299      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.79G    0.05138    0.03613    0.02809         46        640: 1\n",
      "tensor([8.56621], device='cuda:0', grad_fn=<AddBackward0>) tensor(17981.73047, device='cuda:0', grad_fn=<AddBackward0>), tensor(11600.39062, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.513      0.345      0.296       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.79G    0.05126    0.03572    0.02701         47        640: 1\n",
      "tensor([8.47065], device='cuda:0', grad_fn=<AddBackward0>) tensor(18407.27539, device='cuda:0', grad_fn=<AddBackward0>), tensor(11300.21387, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.507      0.374      0.304      0.179\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.79G     0.0508    0.03522    0.02687         32        640: 1\n",
      "tensor([7.65466], device='cuda:0', grad_fn=<AddBackward0>) tensor(18979.72070, device='cuda:0', grad_fn=<AddBackward0>), tensor(10097.93262, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.331      0.297      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.79G    0.05017    0.03583    0.02599         48        640: 1\n",
      "tensor([7.81007], device='cuda:0', grad_fn=<AddBackward0>) tensor(17100.46094, device='cuda:0', grad_fn=<AddBackward0>), tensor(10313.85156, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.525      0.361      0.337        0.2\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.79G    0.05008    0.03513    0.02607         43        640: 1\n",
      "tensor([7.17121], device='cuda:0', grad_fn=<AddBackward0>) tensor(16598.27734, device='cuda:0', grad_fn=<AddBackward0>), tensor(9192.97559, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.509      0.323      0.304      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.79G     0.0501    0.03568    0.02573         64        640: 1\n",
      "tensor([7.81094], device='cuda:0', grad_fn=<AddBackward0>) tensor(17735.67188, device='cuda:0', grad_fn=<AddBackward0>), tensor(10279.22461, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233        0.5      0.361      0.296      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.79G    0.04956    0.03546    0.02578         61        640: 1\n",
      "tensor([7.60071], device='cuda:0', grad_fn=<AddBackward0>) tensor(17675.34570, device='cuda:0', grad_fn=<AddBackward0>), tensor(9699.81738, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.582      0.349      0.332      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.79G    0.04905    0.03569    0.02531         29        640: 1\n",
      "tensor([7.60327], device='cuda:0', grad_fn=<AddBackward0>) tensor(17807.23633, device='cuda:0', grad_fn=<AddBackward0>), tensor(9833.62402, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.343      0.304      0.174\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.79G    0.04874    0.03433    0.02459         39        640: 1\n",
      "tensor([7.29519], device='cuda:0', grad_fn=<AddBackward0>) tensor(16977.50977, device='cuda:0', grad_fn=<AddBackward0>), tensor(9320.93262, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.563      0.374      0.343      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.79G    0.04814    0.03468    0.02488         40        640: 1\n",
      "tensor([7.76688], device='cuda:0', grad_fn=<AddBackward0>) tensor(20945.83398, device='cuda:0', grad_fn=<AddBackward0>), tensor(9705.25488, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.309      0.319      0.187\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.55      0.339      0.331      0.198\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.79G    0.04739    0.03427      0.024         72        640: 1\n",
      "tensor([7.75004], device='cuda:0', grad_fn=<AddBackward0>) tensor(19084.12305, device='cuda:0', grad_fn=<AddBackward0>), tensor(9893.21484, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.567      0.374      0.341      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.79G    0.04721     0.0346    0.02403         41        640: 1\n",
      "tensor([7.74290], device='cuda:0', grad_fn=<AddBackward0>) tensor(21740.33594, device='cuda:0', grad_fn=<AddBackward0>), tensor(9540.95605, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.585      0.345       0.34      0.193\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.79G    0.04754    0.03451    0.02376         33        640: 1\n",
      "tensor([7.58538], device='cuda:0', grad_fn=<AddBackward0>) tensor(18510.30273, device='cuda:0', grad_fn=<AddBackward0>), tensor(9636.10547, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.593      0.332      0.344      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.79G    0.04709    0.03396    0.02311         30        640: 1\n",
      "tensor([7.42293], device='cuda:0', grad_fn=<AddBackward0>) tensor(18516.93359, device='cuda:0', grad_fn=<AddBackward0>), tensor(9832.72461, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.557       0.34      0.339      0.198\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.79G     0.0472    0.03379    0.02308         30        640: 1\n",
      "tensor([7.23637], device='cuda:0', grad_fn=<AddBackward0>) tensor(18939.75586, device='cuda:0', grad_fn=<AddBackward0>), tensor(9315.68457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.658      0.346       0.36      0.214\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.79G    0.04649    0.03376    0.02299         43        640: 1\n",
      "tensor([7.39750], device='cuda:0', grad_fn=<AddBackward0>) tensor(18896.28516, device='cuda:0', grad_fn=<AddBackward0>), tensor(9321.33008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.666      0.342      0.353      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.79G      0.047    0.03358    0.02282         62        640: 1\n",
      "tensor([7.42563], device='cuda:0', grad_fn=<AddBackward0>) tensor(18183.60547, device='cuda:0', grad_fn=<AddBackward0>), tensor(9220.79004, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.573      0.338      0.342      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.79G    0.04654    0.03346    0.02252         40        640: 1\n",
      "tensor([7.64810], device='cuda:0', grad_fn=<AddBackward0>) tensor(18958.19922, device='cuda:0', grad_fn=<AddBackward0>), tensor(9987.75781, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.618      0.339      0.334      0.198\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.79G    0.04624    0.03352    0.02229         37        640: 1\n",
      "tensor([7.65966], device='cuda:0', grad_fn=<AddBackward0>) tensor(19340.16016, device='cuda:0', grad_fn=<AddBackward0>), tensor(9893.93359, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.659      0.348      0.362      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.79G    0.04555      0.033    0.02277         45        640: 1\n",
      "tensor([7.29209], device='cuda:0', grad_fn=<AddBackward0>) tensor(17909.16016, device='cuda:0', grad_fn=<AddBackward0>), tensor(9003.10645, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.334      0.334      0.193\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.79G    0.04607    0.03379    0.02233         70        640: 1\n",
      "tensor([7.46949], device='cuda:0', grad_fn=<AddBackward0>) tensor(18099.44531, device='cuda:0', grad_fn=<AddBackward0>), tensor(9541.37793, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.608      0.351      0.358      0.214\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.79G    0.04566    0.03308    0.02213         34        640: 1\n",
      "tensor([6.86470], device='cuda:0', grad_fn=<AddBackward0>) tensor(18333.35742, device='cuda:0', grad_fn=<AddBackward0>), tensor(8524.60059, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.649      0.331      0.355      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.79G    0.04499     0.0327    0.02191         52        640: 1\n",
      "tensor([6.92444], device='cuda:0', grad_fn=<AddBackward0>) tensor(18482.26367, device='cuda:0', grad_fn=<AddBackward0>), tensor(8536.86426, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.354      0.347      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.79G    0.04507    0.03298    0.02186         46        640: 1\n",
      "tensor([6.79093], device='cuda:0', grad_fn=<AddBackward0>) tensor(18294.82422, device='cuda:0', grad_fn=<AddBackward0>), tensor(8381.00684, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.59      0.343      0.347      0.206\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.79G    0.04459     0.0324    0.02152         42        640: 1\n",
      "tensor([7.09565], device='cuda:0', grad_fn=<AddBackward0>) tensor(18919.10547, device='cuda:0', grad_fn=<AddBackward0>), tensor(8690.18945, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.568       0.33      0.333      0.198\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.79G      0.045    0.03251    0.02148         56        640: 1\n",
      "tensor([6.47251], device='cuda:0', grad_fn=<AddBackward0>) tensor(16488.82617, device='cuda:0', grad_fn=<AddBackward0>), tensor(7839.76465, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.567      0.333      0.348       0.21\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.79G    0.04495    0.03264    0.02154         47        640: 1\n",
      "tensor([7.70357], device='cuda:0', grad_fn=<AddBackward0>) tensor(21516.29492, device='cuda:0', grad_fn=<AddBackward0>), tensor(9231.32812, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.578      0.345      0.349      0.209\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.79G    0.04456    0.03288    0.02134         30        640: 1\n",
      "tensor([6.85632], device='cuda:0', grad_fn=<AddBackward0>) tensor(19182.41992, device='cuda:0', grad_fn=<AddBackward0>), tensor(8499.68359, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.585      0.345      0.353      0.214\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.79G    0.04434    0.03226    0.02074         31        640: 1\n",
      "tensor([6.64975], device='cuda:0', grad_fn=<AddBackward0>) tensor(18830.02539, device='cuda:0', grad_fn=<AddBackward0>), tensor(8464.30273, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.643      0.357      0.354      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.79G    0.04458    0.03281    0.02082         16        640: 1\n",
      "tensor([7.29417], device='cuda:0', grad_fn=<AddBackward0>) tensor(20168.22656, device='cuda:0', grad_fn=<AddBackward0>), tensor(9331.78809, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.352      0.358      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.79G    0.04407    0.03255    0.02066         39        640: 1\n",
      "tensor([7.09446], device='cuda:0', grad_fn=<AddBackward0>) tensor(17959.53711, device='cuda:0', grad_fn=<AddBackward0>), tensor(9067.28125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.357      0.349       0.21\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.79G    0.04355    0.03228    0.02056         45        640: 1\n",
      "tensor([6.90389], device='cuda:0', grad_fn=<AddBackward0>) tensor(17713.39062, device='cuda:0', grad_fn=<AddBackward0>), tensor(8784.16016, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591      0.331       0.35      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.79G     0.0437    0.03262    0.02113         50        640: 1\n",
      "tensor([7.20204], device='cuda:0', grad_fn=<AddBackward0>) tensor(18932.22852, device='cuda:0', grad_fn=<AddBackward0>), tensor(8905.17969, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.533      0.346      0.349      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.79G    0.04385    0.03197    0.02021         28        640: 1\n",
      "tensor([7.31695], device='cuda:0', grad_fn=<AddBackward0>) tensor(20098.80859, device='cuda:0', grad_fn=<AddBackward0>), tensor(9213., device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.368      0.354      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.79G    0.04292    0.03241    0.02051         36        640: 1\n",
      "tensor([6.74589], device='cuda:0', grad_fn=<AddBackward0>) tensor(18539.91211, device='cuda:0', grad_fn=<AddBackward0>), tensor(8408.84473, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.638      0.354      0.354      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.79G    0.04331    0.03195    0.02051         66        640: 1\n",
      "tensor([6.91823], device='cuda:0', grad_fn=<AddBackward0>) tensor(17967.38672, device='cuda:0', grad_fn=<AddBackward0>), tensor(8434.41113, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.518      0.354      0.352      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.79G     0.0428    0.03186    0.02035         54        640: 1\n",
      "tensor([6.44502], device='cuda:0', grad_fn=<AddBackward0>) tensor(16926.45312, device='cuda:0', grad_fn=<AddBackward0>), tensor(7892.21582, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.637      0.359      0.356      0.213\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.79G    0.04305    0.03199    0.01991         41        640: 1\n",
      "tensor([6.50729], device='cuda:0', grad_fn=<AddBackward0>) tensor(16955.58594, device='cuda:0', grad_fn=<AddBackward0>), tensor(8160.41455, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.573      0.378      0.361      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.79G    0.04256    0.03148    0.01988         45        640: 1\n",
      "tensor([6.77872], device='cuda:0', grad_fn=<AddBackward0>) tensor(18700.85742, device='cuda:0', grad_fn=<AddBackward0>), tensor(8331.71191, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.567      0.367       0.36      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.79G    0.04237     0.0317    0.01978         67        640: 1\n",
      "tensor([6.85491], device='cuda:0', grad_fn=<AddBackward0>) tensor(18504.11719, device='cuda:0', grad_fn=<AddBackward0>), tensor(8278.93945, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.572      0.371      0.359       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.79G    0.04252    0.03184    0.02023         61        640: 1\n",
      "tensor([6.90560], device='cuda:0', grad_fn=<AddBackward0>) tensor(18271.08984, device='cuda:0', grad_fn=<AddBackward0>), tensor(8586.76562, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.573       0.36      0.356       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.79G    0.04216    0.03086    0.01953         55        640: 1\n",
      "tensor([6.73760], device='cuda:0', grad_fn=<AddBackward0>) tensor(19279.67773, device='cuda:0', grad_fn=<AddBackward0>), tensor(7785.24756, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.384      0.359      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.79G    0.04228    0.03113    0.01948         34        640: 1\n",
      "tensor([6.40774], device='cuda:0', grad_fn=<AddBackward0>) tensor(17524.69141, device='cuda:0', grad_fn=<AddBackward0>), tensor(7909.92090, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.542      0.373      0.362      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.79G    0.04203    0.03135    0.01925         36        640: 1\n",
      "tensor([7.39846], device='cuda:0', grad_fn=<AddBackward0>) tensor(19927.35547, device='cuda:0', grad_fn=<AddBackward0>), tensor(9289.33887, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.382      0.365      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.79G    0.04194    0.03161    0.01941         52        640: 1\n",
      "tensor([6.96691], device='cuda:0', grad_fn=<AddBackward0>) tensor(19397.47266, device='cuda:0', grad_fn=<AddBackward0>), tensor(8340.25195, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.48      0.382      0.355      0.218\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.79G    0.04174    0.03069    0.01914         31        640: 1\n",
      "tensor([7.17763], device='cuda:0', grad_fn=<AddBackward0>) tensor(19945.68164, device='cuda:0', grad_fn=<AddBackward0>), tensor(9196.37598, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.501      0.385      0.359      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.79G     0.0415    0.03093    0.01933         49        640: 1\n",
      "tensor([6.07562], device='cuda:0', grad_fn=<AddBackward0>) tensor(16814.58594, device='cuda:0', grad_fn=<AddBackward0>), tensor(7263.76953, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.488      0.362      0.353      0.218\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.79G    0.04141    0.03068    0.01909         52        640: 1\n",
      "tensor([6.25810], device='cuda:0', grad_fn=<AddBackward0>) tensor(16762.19141, device='cuda:0', grad_fn=<AddBackward0>), tensor(7494.97900, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.467      0.371      0.354       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.79G    0.04169    0.03098    0.01919         41        640: 1\n",
      "tensor([6.29416], device='cuda:0', grad_fn=<AddBackward0>) tensor(18293.18945, device='cuda:0', grad_fn=<AddBackward0>), tensor(7171.23633, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.374      0.363      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.79G    0.04123     0.0305     0.0191         34        640: 1\n",
      "tensor([6.55241], device='cuda:0', grad_fn=<AddBackward0>) tensor(17611.12695, device='cuda:0', grad_fn=<AddBackward0>), tensor(8318.78320, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521      0.367      0.356       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.79G    0.04093    0.03021    0.01927         37        640: 1\n",
      "tensor([6.29829], device='cuda:0', grad_fn=<AddBackward0>) tensor(17800.06250, device='cuda:0', grad_fn=<AddBackward0>), tensor(7550.20605, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.597      0.379       0.36      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.79G    0.04108     0.0305    0.01868         33        640: 1\n",
      "tensor([6.35568], device='cuda:0', grad_fn=<AddBackward0>) tensor(18218.07812, device='cuda:0', grad_fn=<AddBackward0>), tensor(7466.64014, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.531      0.392       0.36       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.79G    0.04117     0.0305    0.01889         20        640: 1\n",
      "tensor([7.05755], device='cuda:0', grad_fn=<AddBackward0>) tensor(21005.27930, device='cuda:0', grad_fn=<AddBackward0>), tensor(8389.67480, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551      0.375      0.354       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.79G     0.0405    0.03085    0.01898         49        640: 1\n",
      "tensor([6.55062], device='cuda:0', grad_fn=<AddBackward0>) tensor(17584.87305, device='cuda:0', grad_fn=<AddBackward0>), tensor(7948.89990, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.539      0.379      0.357      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.79G    0.04032    0.03046    0.01876         93        640: 1\n",
      "tensor([6.36613], device='cuda:0', grad_fn=<AddBackward0>) tensor(17050.48438, device='cuda:0', grad_fn=<AddBackward0>), tensor(7316.47852, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.577      0.378       0.36      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.79G    0.04055     0.0305     0.0186         32        640: 1\n",
      "tensor([6.31021], device='cuda:0', grad_fn=<AddBackward0>) tensor(16986.73828, device='cuda:0', grad_fn=<AddBackward0>), tensor(7884.47363, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.528      0.375      0.358      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.79G    0.04043    0.03043     0.0188         47        640: 1\n",
      "tensor([6.67783], device='cuda:0', grad_fn=<AddBackward0>) tensor(17423.76172, device='cuda:0', grad_fn=<AddBackward0>), tensor(8177.99365, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551      0.392      0.363      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.79G    0.04025    0.03081     0.0182         43        640: 1\n",
      "tensor([6.47236], device='cuda:0', grad_fn=<AddBackward0>) tensor(17579.96094, device='cuda:0', grad_fn=<AddBackward0>), tensor(8114.45703, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.602      0.383       0.36      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.79G    0.04029    0.02987    0.01823         37        640: 1\n",
      "tensor([6.44987], device='cuda:0', grad_fn=<AddBackward0>) tensor(18573.56055, device='cuda:0', grad_fn=<AddBackward0>), tensor(7771.30078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.531       0.39      0.356      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.79G     0.0401    0.03026    0.01826         53        640: 1\n",
      "tensor([6.30933], device='cuda:0', grad_fn=<AddBackward0>) tensor(18035.52734, device='cuda:0', grad_fn=<AddBackward0>), tensor(7430.77637, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.392      0.361      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.79G    0.04024    0.03062    0.01843         59        640: 1\n",
      "tensor([6.66788], device='cuda:0', grad_fn=<AddBackward0>) tensor(18522.67383, device='cuda:0', grad_fn=<AddBackward0>), tensor(7807.86572, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.53      0.399      0.361      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.79G    0.03951    0.02965    0.01848         26        640: 1\n",
      "tensor([6.52096], device='cuda:0', grad_fn=<AddBackward0>) tensor(17735.81055, device='cuda:0', grad_fn=<AddBackward0>), tensor(8425.74414, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.533       0.39       0.36      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.79G    0.04008    0.03002      0.018         54        640: 1\n",
      "tensor([6.43324], device='cuda:0', grad_fn=<AddBackward0>) tensor(17989.71875, device='cuda:0', grad_fn=<AddBackward0>), tensor(7686.10254, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.53      0.376      0.366      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.79G    0.03948    0.02953    0.01804         31        640: 1\n",
      "tensor([6.70207], device='cuda:0', grad_fn=<AddBackward0>) tensor(18457.27344, device='cuda:0', grad_fn=<AddBackward0>), tensor(8229.42676, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.545      0.388      0.365      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.79G    0.03959       0.03    0.01769         56        640: 1\n",
      "tensor([6.71508], device='cuda:0', grad_fn=<AddBackward0>) tensor(18958.05859, device='cuda:0', grad_fn=<AddBackward0>), tensor(7998.46826, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.496      0.393      0.362      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.79G    0.03938     0.0297    0.01795         45        640: 1\n",
      "tensor([6.01241], device='cuda:0', grad_fn=<AddBackward0>) tensor(17687.56641, device='cuda:0', grad_fn=<AddBackward0>), tensor(7201.31348, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.539      0.387      0.359      0.226\n",
      "\n",
      "80 epochs completed in 1.696 hours.\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages6/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages6/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_2oldmodels_openimages6/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.551      0.392      0.363      0.228\n",
      "                   car       1200        287      0.607      0.571      0.534      0.346\n",
      "                   van       1200         29      0.436      0.414       0.32      0.226\n",
      "                 truck       1200         29      0.244      0.379       0.25      0.145\n",
      "                person       1200       2264      0.415      0.353      0.284      0.136\n",
      "               bicycle       1200         54      0.517      0.481      0.393      0.209\n",
      "                  bird       1200        136      0.625      0.601      0.544      0.307\n",
      "                  boat       1200        145      0.506      0.421      0.442      0.203\n",
      "                bottle       1200         31          0          0   0.000534   0.000265\n",
      "                   bus       1200         15      0.507        0.8      0.754      0.505\n",
      "                   cat       1200          1          1          0    0.00829    0.00415\n",
      "                 chair       1200         21      0.165      0.238     0.0788     0.0255\n",
      "                   dog       1200         42      0.719       0.55      0.574      0.364\n",
      "                 horse       1200         44      0.761      0.545      0.611       0.34\n",
      "                 sheep       1200         10      0.442      0.476      0.497      0.311\n",
      "             billboard       1200          4          1          0     0.0046    0.00184\n",
      "                rabbit       1200         11      0.948      0.545      0.635      0.438\n",
      "                monkey       1200         18      0.623      0.944      0.841      0.573\n",
      "                   pig       1200          6      0.691      0.667      0.677      0.551\n",
      "                   toy       1200         64       0.12     0.0625     0.0553     0.0225\n",
      "         traffic light       1200         18          1          0     0.0338     0.0188\n",
      "          traffic sign       1200          4      0.238      0.178     0.0852       0.06\n",
      "Results saved to \u001b[1mruns/train/k_v_2oldmodels_openimages6\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/22e86818b233466c9158430e81a88fd3\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.4985539528195484\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.392568338996657\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.20940951461466817\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.5168816507842441\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.48148148148148145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.00459735576923077\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.0018389423076923081\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6129539917223824\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 49.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5437809814322851\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.3065582319578991\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.6252473260106848\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.6011347481935717\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 82.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.45957382475006214\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 59.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.4417773142330974\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.20262695906784015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.5063781458225902\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.4206896551724138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 61.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0005344211471470406\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.00026516261432250784\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.6203303242723229\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.7538014727088649\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.5047670991810916\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.506562846348433\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.5886243950796362\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 106.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.5338061808603223\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.34617028581508535\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6068872662923048\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5714285714285714\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 164.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.008291666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.004145833333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.19473126845079153\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.07879767696386257\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.025454745534748458\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.16472933359941996\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.23809523809523808\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6231030824244131\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.5741592304084144\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.36387881180883397\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.7194319402348599\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5495241504973866\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 23.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.6352973290911208\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.6107740640587164\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.3396297140217374\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.7605725859340987\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.5454545454545454\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (6.376399993896484, 25.26181411743164)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.06802127685991662, 0.36606182991281655)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.03288215995177206, 0.22784108199272643)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.4665252415040534, 0.8545633458740657)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.06339234909683947, 0.398715773762856)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.7508007699221719\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.8406569726569729\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.5725926837284535\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.623053350808545\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.9444444444444444\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.3812127530396722\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 1126.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.28387018867846975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.13592218518849036\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.41485840008362534\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.3526151298530569\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 798.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.6787263836941241\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.677225007550589\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.5510002746149201\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.6912304491198453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.692355959811215\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.6346710120556736\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.4381208218209414\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.9475492120994766\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.5454545454545454\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.45833538228809206\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.4968181818181818\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.3109049808447005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.4416408787666273\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.47634161107215\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.08212541499771384\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 29.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.05528798240977453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.02246443012262623\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.11971750716848756\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.0625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.033780662393162394\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.018848270007770007\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.20388383296395168\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.08518326623105182\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.060035311200580796\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 0.23786447179127698\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.17839835384345776\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.039378512650728226, 0.08586543053388596)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.017689934000372887, 0.062245119363069534)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.029531778767704964, 0.042536552995443344)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.2970451130773615\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 34.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.24999859944112324\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.14538653293006903\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.24410365276709878\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.3793103448275862\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.0539422482252121, 0.06756383925676346)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.02715795859694481, 0.041648298501968384)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.023980742320418358, 0.026742633432149887)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.42452553540449084\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.32000241723366674\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.2261444629907277\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.4358295195057883\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.41379310344827586\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/22e86818b233466c9158430e81a88fd3\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : [0.0001, 0.0005]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : ['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 80\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_2oldmodels_openimages6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.34 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_LwfPro.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda \\\n",
    "        1e-4 \\\n",
    "        5e-4 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "        ./runs/train/fog_02/weights/last.pt \\\n",
    "--name k_v_2oldmodels_openimages \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8c96b75f-2ab6-4c36-8d35-a873c1dccbbf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_2oldmodels_openimages6/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.508       0.29      0.345      0.209\n",
      "                   car        600        113      0.454      0.363      0.321      0.201\n",
      "                   van        600          6          0          0     0.0102    0.00689\n",
      "                 truck        600         17      0.766      0.386      0.513      0.403\n",
      "                person        600       1131      0.487      0.297      0.297      0.145\n",
      "               bicycle        600         43      0.737      0.419      0.518      0.272\n",
      "                  bird        600         61      0.508      0.459      0.407      0.245\n",
      "                  boat        600         82      0.748      0.451      0.494      0.223\n",
      "                bottle        600          1          0          0          0          0\n",
      "                   bus        600          3      0.427      0.333      0.338      0.304\n",
      "                   cat        600          5          1          0      0.508      0.261\n",
      "                 chair        600         12      0.235      0.167      0.232      0.105\n",
      "                   dog        600         25       0.66       0.56      0.605       0.37\n",
      "                 horse        600         37      0.738      0.622      0.697       0.38\n",
      "                 sheep        600          8      0.632      0.625       0.64      0.453\n",
      "                 train        600          2          1          0    0.00315    0.00185\n",
      "             billboard        600          3      0.425      0.333      0.339      0.305\n",
      "                rabbit        600          1          0          0      0.142     0.0995\n",
      "                monkey        600         16      0.483        0.5       0.46      0.228\n",
      "                   pig        600          7      0.329      0.714      0.453      0.335\n",
      "                   toy        600         42      0.556      0.143      0.181     0.0726\n",
      "         traffic light        600          5          1          0      0.236      0.168\n",
      "          traffic sign        600          1          0          0      0.199      0.022\n",
      "Speed: 0.1ms pre-process, 2.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp252\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_2oldmodels_openimages6/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.497      0.377      0.411      0.221\n",
      "                   car       4952       1201      0.393      0.855      0.751      0.509\n",
      "                person       4952       4528      0.343      0.612      0.438      0.234\n",
      "             aeroplane       4952        285      0.646     0.0351      0.205     0.0896\n",
      "               bicycle       4952        337      0.411      0.712      0.616      0.353\n",
      "                  bird       4952        459      0.408      0.606      0.516       0.27\n",
      "                  boat       4952        263      0.158      0.631      0.358      0.162\n",
      "                bottle       4952        469      0.427      0.148      0.157     0.0734\n",
      "                   bus       4952        213      0.563      0.643      0.612      0.413\n",
      "                   cat       4952        358      0.694      0.095      0.438      0.224\n",
      "                 chair       4952        756      0.352      0.314      0.282      0.132\n",
      "                   cow       4952        244      0.573      0.102      0.335      0.192\n",
      "           diningtable       4952        206        0.7     0.0907      0.283     0.0977\n",
      "                   dog       4952        489      0.464      0.628      0.558      0.289\n",
      "                 horse       4952        348      0.525      0.727       0.69      0.367\n",
      "             motorbike       4952        325      0.851      0.228      0.523      0.246\n",
      "           pottedplant       4952        480      0.218     0.0146     0.0154    0.00535\n",
      "                 sheep       4952        242      0.377      0.645      0.518      0.316\n",
      "                  sofa       4952        239      0.716      0.146      0.342      0.173\n",
      "                 train       4952        282      0.724      0.255       0.48      0.223\n",
      "             tvmonitor       4952        308      0.397     0.0584     0.0947      0.043\n",
      "Speed: 0.1ms pre-process, 1.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp254\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_2oldmodels_openimages6/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.46      0.338      0.318      0.159\n",
      "                   car       2244       8711      0.478      0.778      0.751      0.408\n",
      "                   van       2244        861      0.607      0.423      0.438      0.244\n",
      "                 truck       2244        333      0.221      0.613       0.37      0.229\n",
      "                  tram       2244        138      0.767      0.123      0.258      0.112\n",
      "                person       2244       1286      0.227      0.541      0.368      0.161\n",
      "        person_sitting       2244         89      0.265     0.0649     0.0852     0.0293\n",
      "               cyclist       2244        496      0.467      0.113      0.154     0.0393\n",
      "                  misc       2244        284      0.648     0.0518      0.119     0.0461\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp256\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/k_v_2oldmodels_openimages6/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f23535a3-fe4c-4bcf-a5ab-075dc538995e",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
