{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "0444f4a5-8c8b-4ca2-b54e-1d55a999b322",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp86/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 3868f729 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 160 layers, 7080253 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.934      0.145      0.178      0.093\n",
      "                   car       2244       8711      0.806      0.714      0.771       0.43\n",
      "                   van       2244        861          1          0     0.0499     0.0321\n",
      "                 truck       2244        333          1          0     0.0773      0.049\n",
      "                  tram       2244        138          1          0     0.0296     0.0186\n",
      "                person       2244       1286      0.668      0.445      0.477      0.206\n",
      "        person_sitting       2244         89          1          0   0.000255   0.000127\n",
      "               cyclist       2244        496          1          0    0.00689     0.0021\n",
      "                  misc       2244        284          1          0     0.0112    0.00645\n",
      "Speed: 0.1ms pre-process, 0.9ms inference, 1.6ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp165\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp86/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf110331-7a8e-4e23-a1de-4e92a7a34c3e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp87/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c8f795cd-dd56-4280-938e-172013b5193c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0858548c-ad24-4c3a-bdda-3a2034f2a2d3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d285a2ef-b2f4-4fb5-a091-21f269f3268f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "63506e4c-1e30-440c-94af-c7108a09271b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "4a5bbf50-3317-4234-bc10-f8dbd04ec03b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/exp4/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 613f2732 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/2e70946cfeac4e0d8a066f32afaa63d1\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/exp4/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp12/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp12\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.63G    0.06176    0.05453    0.02255        128        640: 1\n",
      "tensor([1.62493], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \u001b[1;38;5;214mCOMET WARNING:\u001b[0m Unknown error retrieving Conda information\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.482      0.265      0.267      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.63G    0.05695    0.04653    0.01709        133        640: 1\n",
      "tensor([1.60703], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.528      0.241      0.253      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.63G     0.0584    0.04779    0.01747        131        640: 1\n",
      "tensor([1.69487], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.47      0.218      0.204      0.104\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.63G    0.05838     0.0477    0.01699        108        640: 1\n",
      "tensor([1.45294], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.384      0.168      0.152     0.0735\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.63G    0.05725    0.04663    0.01584        156        640: 1\n",
      "tensor([1.41924], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.529      0.236      0.245      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.63G    0.05544    0.04557    0.01436        123        640: 1\n",
      "tensor([1.33516], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.525      0.257       0.27      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.63G    0.05454    0.04474    0.01375        174        640: 1\n",
      "tensor([1.66078], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.543      0.203      0.241       0.12\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.63G    0.05369    0.04374    0.01279        166        640: 1\n",
      "tensor([1.61688], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.477      0.243      0.247      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.63G    0.05322    0.04409     0.0123        152        640: 1\n",
      "tensor([1.41703], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.557      0.265      0.278      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.63G    0.05287    0.04394    0.01193        136        640: 1\n",
      "tensor([1.30644], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.555      0.286      0.306       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.63G    0.05226    0.04358    0.01126        134        640: 1\n",
      "tensor([1.29315], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.425      0.202      0.202      0.104\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.63G    0.05069    0.04205    0.01019        112        640: 1\n",
      "tensor([1.37628], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.554      0.297      0.305      0.161\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.63G    0.05022    0.04228   0.009834        151        640: 1\n",
      "tensor([1.20340], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.543      0.266      0.279      0.145\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/59      3.63G    0.05025    0.04231   0.009696        132        640: 1\n",
      "tensor([1.29414], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.566      0.307      0.329      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/59      3.63G    0.04948    0.04148   0.009489        131        640: 1\n",
      "tensor([1.32962], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.539      0.286      0.296      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/59      3.63G    0.04961    0.04162   0.009146        159        640: 1\n",
      "tensor([1.33841], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.741      0.297      0.357      0.185\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/59      3.63G    0.04892    0.04113   0.008882        125        640: 1\n",
      "tensor([1.22932], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.58      0.274      0.308      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/59      3.63G    0.04877    0.04148   0.008834         88        640: 1\n",
      "tensor([1.11570], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.66      0.278      0.314      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/59      3.63G    0.04831    0.04033   0.008496        137        640: 1\n",
      "tensor([1.41545], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.556      0.315      0.319      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/59      3.63G    0.04794    0.04107   0.008039        166        640: 1\n",
      "tensor([1.36250], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.538      0.315      0.318      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/59      3.63G    0.04751    0.04034   0.007877        161        640: 1\n",
      "tensor([1.29328], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.615      0.291      0.323      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/59      3.63G    0.04773    0.04032   0.007979        118        640: 1\n",
      "tensor([1.24329], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.626        0.3      0.328      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/59      3.63G    0.04713    0.04001   0.007743        151        640: 1\n",
      "tensor([1.34605], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.53      0.298      0.308      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/59      3.63G    0.04697    0.04028   0.007553        133        640: 1\n",
      "tensor([1.15320], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.621      0.275      0.303      0.153\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/59      3.63G    0.04652    0.03976   0.007278        154        640: 1\n",
      "tensor([1.39917], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.691      0.295       0.34      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/59      3.63G    0.04649    0.03988    0.00716        122        640: 1\n",
      "tensor([1.10175], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.576      0.269      0.308      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/59      3.63G    0.04592    0.03941   0.006961        123        640: 1\n",
      "tensor([1.06721], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.597        0.3      0.326      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/59      3.63G     0.0459    0.03905   0.006956        127        640: 1\n",
      "tensor([1.14636], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.663      0.266      0.304      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/59      3.63G    0.04562    0.03801   0.006655        127        640: 1\n",
      "tensor([1.04751], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.526      0.275      0.297      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/59      3.63G    0.04532     0.0386   0.006513        122        640: 1\n",
      "tensor([1.15837], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.697      0.302      0.335       0.18\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/59      3.63G    0.04506    0.03881   0.006445        146        640: 1\n",
      "tensor([1.28472], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.577      0.279      0.296       0.16\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/59      3.63G    0.04463    0.03809   0.006246        202        640: 1\n",
      "tensor([1.40758], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.589      0.327      0.351      0.196\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/59      3.63G    0.04486    0.03797   0.006253         94        640: 1\n",
      "tensor([0.89492], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.656      0.276      0.312      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/59      3.63G    0.04433    0.03786    0.00613        152        640: 1\n",
      "tensor([1.23166], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.614      0.329      0.343      0.187\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/59      3.63G    0.04416    0.03773   0.006116        123        640: 1\n",
      "tensor([1.08793], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.726       0.29      0.339      0.178\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/59      3.63G    0.04386    0.03768    0.00614        162        640: 1\n",
      "tensor([1.06694], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.686      0.285      0.323      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/59      3.63G    0.04368    0.03789   0.006011        161        640: 1\n",
      "tensor([1.14202], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.568        0.3      0.321      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/59      3.63G    0.04327    0.03753   0.005638        122        640: 1\n",
      "tensor([1.02414], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.634      0.296      0.324      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/59      3.63G    0.04354    0.03728   0.005777        126        640: 1\n",
      "tensor([0.96917], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.642        0.3      0.327      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/59      3.63G    0.04299    0.03716   0.005563         90        640: 1\n",
      "tensor([0.99149], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.714      0.271      0.323      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/59      3.63G    0.04271    0.03685   0.005472        118        640: 1\n",
      "tensor([1.17274], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.532      0.276      0.294      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/59      3.63G    0.04275    0.03694   0.005218        157        640: 1\n",
      "tensor([1.15869], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.635      0.248      0.283      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/59      3.63G    0.04223    0.03667   0.005093        104        640: 1\n",
      "tensor([0.93595], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.636      0.264      0.296      0.148\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/59      3.63G    0.04228    0.03708   0.005117        157        640: 1\n",
      "tensor([1.02304], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.643      0.273      0.305      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/59      3.63G    0.04179     0.0362   0.004937        108        640: 1\n",
      "tensor([0.94424], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.632      0.265      0.312      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/59      3.63G    0.04183    0.03615   0.005035        159        640: 1\n",
      "tensor([1.05748], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.665      0.254      0.299      0.155\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/59      3.63G     0.0415    0.03618   0.004885        118        640: 1\n",
      "tensor([1.09211], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.604      0.285      0.307      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/59      3.63G    0.04129    0.03642   0.004764        176        640: 1\n",
      "tensor([1.22990], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.691      0.279      0.319      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/59      3.63G    0.04123     0.0358   0.004875        130        640: 1\n",
      "tensor([1.08106], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.632      0.258      0.298      0.156\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/59      3.63G    0.04071    0.03612   0.004633        178        640: 1\n",
      "tensor([1.21249], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.601      0.302      0.321      0.178\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/59      3.63G    0.04069    0.03556   0.004641        148        640: 1\n",
      "tensor([0.97978], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.689      0.267      0.315       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/59      3.63G    0.04053    0.03531   0.004538        115        640: 1\n",
      "tensor([1.00085], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.655      0.287      0.327      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/59      3.63G    0.04019    0.03505   0.004458        124        640: 1\n",
      "tensor([0.98522], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.659      0.284      0.321      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/59      3.63G    0.04021    0.03484   0.004533        163        640: 1\n",
      "tensor([1.05231], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.652      0.282      0.313      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/59      3.63G    0.03997    0.03512   0.004356        200        640: 1\n",
      "tensor([1.17552], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.621      0.283      0.315       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/59      3.63G    0.03973    0.03503   0.004319        141        640: 1\n",
      "tensor([0.98173], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.644      0.281      0.314      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/59      3.63G    0.03971    0.03483   0.004396        146        640: 1\n",
      "tensor([0.91303], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.681      0.276      0.316      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/59      3.63G     0.0398    0.03489   0.004314        168        640: 1\n",
      "tensor([1.08098], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.674      0.277      0.316      0.172\n",
      "\n",
      "60 epochs completed in 0.794 hours.\n",
      "Optimizer stripped from runs/train/exp12/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp12/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp12/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.589      0.327      0.351      0.196\n",
      "                   Car       1048       4012      0.808      0.547      0.645      0.407\n",
      "                   Van       1048        431      0.547      0.341      0.374      0.254\n",
      "                 Truck       1048        166      0.724      0.206       0.27      0.173\n",
      "                  Tram       1048         56      0.638      0.196       0.22      0.105\n",
      "            Pedestrian       1048        618      0.662      0.424      0.463       0.21\n",
      "        Person_sitting       1048         20       0.31       0.45      0.356      0.177\n",
      "               Cyclist       1048        234      0.532      0.274      0.271      0.138\n",
      "                  Misc       1048        138      0.488      0.181      0.207      0.108\n",
      "Results saved to \u001b[1mruns/train/exp12\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/2e70946cfeac4e0d8a066f32afaa63d1\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.6522865718088325\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 521.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.6448084789054898\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.4068268120738187\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.8080719973705994\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.5468594217347956\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 2194.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.3612871192036532\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 56.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.2708367471817118\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.1383140236387322\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.5320526135953133\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.27350427350427353\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 64.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.2642629505982419\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.2067889602873298\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.1084471256839092\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.4882289370226702\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.18115942028985507\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.5167459822812661\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 134.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.4628142889965313\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.20951041491034333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.6615528809390178\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.42394822006472493\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 262.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.3669747293156782\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.3556677791075489\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.1766732123387978\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.30981388176970076\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.45\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.30034220921256194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.22023134254775834\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.10459402347506722\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.637689384526521\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.19642857142857142\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.32016872829313153\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.27002957009188355\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.17260455300227023\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.7240772969939636\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.20552288323372658\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 34.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.4200609732941449\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 122.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.37449560997345815\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.2537178837337\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.5466748930031957\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.34106728538283065\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 147.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [1566]                    : (1.0142959356307983, 3.6865339279174805)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [120]          : (0.1516308401134992, 0.3571165890549326)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [120]     : (0.07347164407515708, 0.1961312175572013)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [120]        : (0.3838219360542817, 0.7407277712503814)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [120]           : (0.1682546340034387, 0.32862195558075574)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [120]           : (0.039707254618406296, 0.06176451966166496)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [120]           : (0.004314018413424492, 0.022548111155629158)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [120]           : (0.0348280668258667, 0.05453416332602501)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [120]             : (0.0483892448246479, 0.06865653395652771)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [120]             : (0.015410058200359344, 0.026303516700863838)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [120]             : (0.07280407845973969, 0.10944955796003342)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [120]                    : (0.00043, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/2e70946cfeac4e0d8a066f32afaa63d1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp12\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.91 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/exp4/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "aebe08ec-021d-4b24-bab4-28122765d400",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/exp12/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 613f2732 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.56      0.266      0.294      0.161\n",
      "                   Car       2244       8711      0.782      0.445       0.55      0.345\n",
      "                   Van       2244        861      0.564      0.256      0.289      0.193\n",
      "                 Truck       2244        333      0.332      0.195      0.195      0.119\n",
      "                  Tram       2244        138      0.676     0.0725      0.108     0.0388\n",
      "            Pedestrian       2244       1286      0.639        0.4      0.448      0.219\n",
      "        Person_sitting       2244         89      0.298      0.371      0.295      0.136\n",
      "               Cyclist       2244        496      0.723      0.222      0.277      0.145\n",
      "                  Misc       2244        284      0.462      0.169      0.189     0.0902\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 1.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp114\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/exp12/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 这个是没有ewc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b7c9e051-cd43-4f09-8b66-757b45477588",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b8c4b957-09b7-40d3-b3c6-ded49e084d65",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f893e8e3-7e4a-4f67-bdf8-94020c2ccd4e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f66b9cd-79ac-4c3a-9a25-d1d30a56d90e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e87aba0b-cb47-4614-90c1-47a2cdf9b0ef",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "246459c4-eb95-46e2-a8b3-7869a2881f23",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1ebe9407-d2d8-484d-b1c5-9b10893b95b3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cb87c991-8c89-4b4b-9f89-f5f4ed10505f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "6b1d50a0-9ba5-4067-ac6b-e48764a21e69",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/exp4/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=10.0, SI_pt=./runs/train/exp4/weights/si.pt\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 613f2732 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/738010bb643b4e4f97b50fd1fb5aa843\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/exp4/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp16/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp16\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.59G    0.07803    0.07073    0.03011        128        640: 1\n",
      "tensor([33.25062], device='cuda:0', grad_fn=<AddBackward0>) tensor([44.89581], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.28      0.136      0.108     0.0528\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.59G    0.06598    0.04905    0.02092        133        640: 1\n",
      "tensor([10.80686], device='cuda:0', grad_fn=<AddBackward0>) tensor([19.90158], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.307      0.142      0.131      0.068\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.59G    0.06054    0.04764    0.01879        131        640: 1\n",
      "tensor([2.78623], device='cuda:0', grad_fn=<AddBackward0>) tensor([4.49849], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.367      0.218      0.185     0.0954\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.59G    0.05654     0.0464    0.01677        108        640: 1\n",
      "tensor([1.90710], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.69841], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.439       0.24       0.23      0.115\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.59G    0.05465    0.04491    0.01508        156        640: 1\n",
      "tensor([2.08473], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.73027], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.593      0.275      0.304      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.59G    0.05262    0.04362    0.01343        123        640: 1\n",
      "tensor([2.04486], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.69753], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.54      0.342      0.351       0.18\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.59G    0.05239    0.04327    0.01307        174        640: 1\n",
      "tensor([2.24388], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.67235], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.604      0.311      0.348      0.179\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.59G    0.05153    0.04229    0.01212        166        640: 1\n",
      "tensor([2.04928], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.65658], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.546      0.368      0.371      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.59G    0.05108    0.04267    0.01182        152        640: 1\n",
      "tensor([1.91921], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.63339], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.644      0.342      0.398      0.208\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.59G    0.05116    0.04266    0.01177        136        640: 1\n",
      "tensor([1.77337], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.61445], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.649      0.347      0.404      0.209\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.59G    0.05073    0.04254     0.0112        134        640: 1\n",
      "tensor([1.76394], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.59340], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.64      0.357      0.411      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.59G    0.05039    0.04199    0.01097        182        640: 1\n",
      "tensor([2.04471], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.58197], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.546      0.382      0.383        0.2\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.59G    0.05012    0.04224     0.0107        128        640: 1\n",
      "tensor([1.77698], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.56304], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.629      0.369      0.411      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.59G    0.04963    0.04134     0.0103        112        640: 1\n",
      "tensor([1.83525], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.54257], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.635      0.339      0.392      0.214\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.59G    0.04958    0.04197     0.0103        151        640: 1\n",
      "tensor([1.82664], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.52500], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.57      0.359      0.394      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/59      3.59G    0.04959     0.0419    0.01015        132        640: 1\n",
      "tensor([1.85510], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.50648], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.609      0.387      0.424      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/59      3.59G    0.04916    0.04121   0.009982        131        640: 1\n",
      "tensor([1.86642], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.48988], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.599      0.417      0.438      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/59      3.59G    0.04903    0.04127   0.009607        159        640: 1\n",
      "tensor([1.66140], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.47305], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.663      0.357      0.416      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/59      3.59G    0.04883    0.04103   0.009487        125        640: 1\n",
      "tensor([1.67981], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.45592], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.607       0.36      0.408      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/59      3.59G    0.04871    0.04149   0.009672         88        640: 1\n",
      "tensor([1.51197], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.44009], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.684      0.351      0.425       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/59      3.59G    0.04861     0.0405   0.009353        137        640: 1\n",
      "tensor([2.02342], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.42499], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.609      0.334      0.388      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/59      3.59G    0.04854     0.0414   0.009117        166        640: 1\n",
      "tensor([1.69268], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.40781], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.668      0.315      0.376      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/59      3.59G    0.04808    0.04084    0.00902        161        640: 1\n",
      "tensor([1.64965], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.39593], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.686      0.405      0.464      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/59      3.59G    0.04819    0.04048   0.008889        118        640: 1\n",
      "tensor([1.53268], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.38119], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.706      0.402      0.478      0.263\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/59      3.59G    0.04835    0.04076   0.009268        151        640: 1\n",
      "tensor([1.80713], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.36744], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.659      0.347      0.387      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/59      3.59G    0.04807    0.04104    0.00882        133        640: 1\n",
      "tensor([1.56449], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.35840], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.568      0.326      0.362      0.202\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/59      3.59G    0.04791     0.0406   0.008718        154        640: 1\n",
      "tensor([1.86477], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.34173], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.645      0.348      0.394      0.214\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/59      3.59G    0.04792    0.04074   0.008654        122        640: 1\n",
      "tensor([1.46170], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.32903], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.615      0.343      0.384      0.203\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/59      3.59G     0.0474    0.04046   0.008228        123        640: 1\n",
      "tensor([1.35416], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.31506], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.653      0.344      0.407      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/59      3.59G    0.04752    0.04028   0.008458        127        640: 1\n",
      "tensor([1.52612], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.30169], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.612      0.331      0.383      0.203\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/59      3.59G    0.04794    0.03953   0.008605        127        640: 1\n",
      "tensor([1.42861], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.28525], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.554      0.315      0.346      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/59      3.59G     0.0474    0.04004   0.008388        122        640: 1\n",
      "tensor([1.53009], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.28096], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.598        0.4      0.424      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/59      3.59G    0.04723    0.04035    0.00823        146        640: 1\n",
      "tensor([1.63810], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.26668], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.642      0.381      0.426      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/59      3.59G    0.04698    0.03972   0.008004        202        640: 1\n",
      "tensor([1.75343], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.25263], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.707       0.36      0.441      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/59      3.59G    0.04739    0.03975   0.008053         94        640: 1\n",
      "tensor([1.24446], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.24431], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.658      0.362      0.407      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/59      3.59G    0.04705    0.03977   0.007919        152        640: 1\n",
      "tensor([1.50629], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.23292], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.63      0.351      0.397      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/59      3.59G      0.047    0.03958   0.008062        123        640: 1\n",
      "tensor([1.37372], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.22500], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.655      0.352      0.403      0.218\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/59      3.59G    0.04692    0.03972   0.008137        162        640: 1\n",
      "tensor([1.37438], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.21377], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.608      0.398      0.433      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/59      3.59G    0.04674    0.03999    0.00791        161        640: 1\n",
      "tensor([1.44598], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.20481], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.548      0.435      0.436      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/59      3.59G    0.04657    0.03987   0.007621        122        640: 1\n",
      "tensor([1.31869], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.19579], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.63       0.41      0.443      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/59      3.59G    0.04698     0.0397   0.007967        126        640: 1\n",
      "tensor([1.18899], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.18545], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.661      0.348      0.417      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/59      3.59G     0.0465    0.03963   0.007646         90        640: 1\n",
      "tensor([1.23253], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.17841], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.694       0.35      0.422       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/59      3.59G    0.04649    0.03948   0.007658        118        640: 1\n",
      "tensor([1.45929], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.17098], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.639      0.364        0.4      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/59      3.59G    0.04653    0.03958   0.007387        157        640: 1\n",
      "tensor([1.43231], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.16348], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.621      0.381      0.407      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/59      3.59G    0.04634    0.03951   0.007256        104        640: 1\n",
      "tensor([1.23519], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.15489], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.578      0.407      0.425      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/59      3.59G    0.04648    0.04004   0.007337        157        640: 1\n",
      "tensor([1.28987], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.15022], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.625      0.334      0.385      0.202\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/59      3.59G    0.04605    0.03926   0.007179        108        640: 1\n",
      "tensor([1.17813], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.14405], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.651      0.349      0.401      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/59      3.59G    0.04637    0.03934   0.007337        159        640: 1\n",
      "tensor([1.31771], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.13643], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.669      0.372      0.425      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/59      3.59G    0.04623    0.03948   0.007239        118        640: 1\n",
      "tensor([1.35953], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.13278], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.654      0.331      0.377      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/59      3.59G    0.04596    0.03977   0.007081        176        640: 1\n",
      "tensor([1.51424], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.12803], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.634      0.376      0.411      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/59      3.59G    0.04618    0.03928   0.007331        130        640: 1\n",
      "tensor([1.36754], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.12220], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.662       0.36      0.406      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/59      3.59G    0.04574    0.03982   0.006963        178        640: 1\n",
      "tensor([1.49222], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11969], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.582      0.397      0.421      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/59      3.59G    0.04577    0.03914    0.00707        148        640: 1\n",
      "tensor([1.24475], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11600], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.663      0.337       0.39      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/59      3.59G    0.04559    0.03899   0.006865        115        640: 1\n",
      "tensor([1.28441], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11528], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.598      0.393       0.43      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/59      3.59G    0.04517     0.0387   0.006702        124        640: 1\n",
      "tensor([1.25244], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11503], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.676      0.371      0.431      0.231\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/59      3.59G    0.04526    0.03841   0.006733        163        640: 1\n",
      "tensor([1.26320], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11434], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.714      0.348      0.421      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/59      3.59G    0.04488    0.03866   0.006461        200        640: 1\n",
      "tensor([1.40045], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11354], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.625      0.364       0.39      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/59      3.59G    0.04456    0.03859   0.006377        141        640: 1\n",
      "tensor([1.21359], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11272], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.613      0.407      0.441       0.24\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/59      3.59G    0.04457    0.03833   0.006385        146        640: 1\n",
      "tensor([1.16480], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11176], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.679      0.369      0.419      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/59      3.59G    0.04451     0.0382   0.006193        168        640: 1\n",
      "tensor([1.34828], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.11108], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.615      0.386      0.414      0.229\n",
      "\n",
      "60 epochs completed in 1.279 hours.\n",
      "Optimizer stripped from runs/train/exp16/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp16/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp16/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.706      0.402      0.478      0.263\n",
      "                   Car       1048       4012      0.834      0.707      0.799      0.518\n",
      "                   Van       1048        431      0.583      0.614      0.603      0.384\n",
      "                 Truck       1048        166       0.75       0.38      0.522      0.308\n",
      "                  Tram       1048         56      0.841      0.232      0.395       0.17\n",
      "            Pedestrian       1048        618       0.73      0.415      0.485      0.226\n",
      "        Person_sitting       1048         20      0.563       0.45      0.434      0.203\n",
      "               Cyclist       1048        234      0.693      0.164      0.249      0.106\n",
      "                  Misc       1048        138      0.656      0.254      0.338      0.188\n",
      "Results saved to \u001b[1mruns/train/exp16\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/738010bb643b4e4f97b50fd1fb5aa843\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.7653625022777072\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 566.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.7990323443718613\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.5184344301743232\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.8337221925855788\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.7073633952995867\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 2838.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.2647145347879978\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.24935045989453833\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.10586811799158066\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.6925087925087925\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.16363174482832601\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 38.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.3657819821147098\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 18.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.33789333405775673\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.18809140953441744\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.6557889903258333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.2536231884057971\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 35.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.5292865299106017\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 95.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.4849859664153329\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.22591668318118913\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.7298041671261044\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.415206344008933\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 257.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.5002911959465435\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.4340699888161548\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.20255410184074624\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.5632376267244689\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.45\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.3638262869525895\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.3945367030587412\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.17031588946980886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.8407341749697792\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.23214285714285715\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.5040069677641195\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.5215452610929573\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.30833842270586204\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.7500308600592087\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.3795180722891566\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 63.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.5982805490620734\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 189.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.6028056868161399\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.38372115082594876\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.5833585430691361\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.6139859890826634\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 265.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [1566]                    : (1.2636823654174805, 65.24312591552734)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [120]          : (0.10824170155412775, 0.47762988340243984)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [120]     : (0.05281952289485112, 0.2627430264570776)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [120]        : (0.27962899968614097, 0.7141243999045602)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [120]           : (0.13595372332362296, 0.434666619298715)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [120]           : (0.04450955241918564, 0.07802567631006241)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [120]           : (0.0061926585622131824, 0.030111856758594513)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [120]           : (0.03820003196597099, 0.0707305520772934)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [120]             : (0.04020674154162407, 0.06468833237886429)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [120]             : (0.012546643614768982, 0.03220788389444351)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [120]             : (0.06660345941781998, 0.12476649135351181)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [120]                    : (0.00043, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/738010bb643b4e4f97b50fd1fb5aa843\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.96 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/exp4/weights/best.pt \\\n",
    "--SI_enable 1e1 \\\n",
    "--SI_pt ./runs/train/exp4/weights/si.pt\\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "9251816e-1d13-4a41-b3bc-dd8af38ee24d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/exp16/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 613f2732 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.61      0.352      0.389      0.205\n",
      "                   Car       2244       8711      0.775      0.648      0.734      0.464\n",
      "                   Van       2244        861      0.565      0.456       0.48      0.292\n",
      "                 Truck       2244        333      0.533      0.426       0.41      0.247\n",
      "                  Tram       2244        138      0.764      0.145      0.249     0.0843\n",
      "            Pedestrian       2244       1286      0.627      0.422      0.465      0.218\n",
      "        Person_sitting       2244         89      0.369       0.36      0.294     0.0991\n",
      "               Cyclist       2244        496      0.711      0.157      0.223      0.103\n",
      "                  Misc       2244        284      0.532      0.201      0.256      0.132\n",
      "Speed: 0.0ms pre-process, 0.8ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp116\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/exp16/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 这个是没有ewc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0fda647d-079f-4331-8f78-a027575d47dd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "53d37a18-3156-426c-98a1-a9606fd48adb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "da8edfb3-ceb1-4233-8517-7b6df93a4363",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "230d6916-7367-4cfb-a76f-d34ab1bc28ad",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b16c2168-3d68-412c-b16c-65bc00886a25",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a0b5bd8c-2f3e-4321-b0c2-829e8dfc9689",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/exp4/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=30, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=./runs/train/exp4/weights/si.pt\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 613f2732 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/9767712834bc49f798bf646d6b2a1a25\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/exp4/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp9/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp9\u001b[0m\n",
      "Starting training for 30 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/29      3.59G    0.06827    0.05486    0.02291        128        640: 1\n",
      "tensor([3.24921], device='cuda:0', grad_fn=<AddBackward0>) tensor([2.61163], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.503      0.227      0.243      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/29      3.59G    0.06258    0.04641     0.0177        179        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 129412 ms: Connection timed out\n",
      "       1/29      3.59G    0.06192    0.04609    0.01714        133        640: 1\n",
      "tensor([2.64915], device='cuda:0', grad_fn=<AddBackward0>) tensor([1.52819], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.53      0.289       0.28      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/29      3.59G    0.05956     0.0476    0.01748        131        640: 1\n",
      "tensor([1.78659], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.22381], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.401      0.111     0.0974     0.0466\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/29      3.59G     0.0595    0.04847    0.01792        108        640: 1\n",
      "tensor([1.47084], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.05424], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.377      0.143      0.148     0.0722\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/29      3.59G    0.05774    0.04707    0.01652        156        640: 1\n",
      "tensor([1.52637], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06469], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.492      0.156      0.152      0.075\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/29      3.59G    0.05645    0.04624    0.01525        123        640: 1\n",
      "tensor([1.42587], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.07016], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.519       0.24      0.249      0.121\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/29      3.59G     0.0553    0.04533    0.01449        174        640: 1\n",
      "tensor([1.72302], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06901], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.371      0.143      0.137     0.0647\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/29      3.59G    0.05409    0.04412    0.01335        166        640: 1\n",
      "tensor([1.70302], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06759], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.582       0.26       0.29      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/29      3.59G    0.05347    0.04448    0.01285        152        640: 1\n",
      "tensor([1.50986], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06995], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.501      0.201      0.195     0.0932\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/29      3.59G    0.05315    0.04417    0.01256        136        640: 1\n",
      "tensor([1.34964], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06785], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.571      0.258      0.301      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/29      3.59G    0.05225    0.04376     0.0117        134        640: 1\n",
      "tensor([1.37616], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06584], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.49      0.257      0.263      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/29      3.59G    0.05164    0.04281    0.01113        182        640: 1\n",
      "tensor([1.35597], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06473], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.604       0.24       0.27      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/29      3.59G    0.05109    0.04299    0.01077        128        640: 1\n",
      "tensor([1.27830], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06399], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.584      0.297      0.307      0.166\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/29      3.59G    0.05037    0.04193    0.01032        112        640: 1\n",
      "tensor([1.48703], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06376], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.606        0.3      0.323       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/29      3.59G    0.04995    0.04228    0.01003        151        640: 1\n",
      "tensor([1.26860], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06383], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.491      0.263      0.265      0.136\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/29      3.59G    0.04992    0.04236   0.009882        132        640: 1\n",
      "tensor([1.33673], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06417], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.596       0.35      0.369      0.197\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/29      3.59G    0.04898    0.04123   0.009409        131        640: 1\n",
      "tensor([1.36654], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06330], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.612      0.286      0.324       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/29      3.59G     0.0485    0.04118   0.009027        159        640: 1\n",
      "tensor([1.38302], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06243], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.653      0.331      0.351       0.19\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/29      3.59G    0.04797    0.04075    0.00859        125        640: 1\n",
      "tensor([1.29842], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06181], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.65      0.329      0.373      0.195\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/29      3.59G    0.04765    0.04096   0.008628         88        640: 1\n",
      "tensor([1.17853], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06118], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.589      0.317      0.322      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/29      3.59G    0.04717     0.0397   0.008168        217        640: 1"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 30 \\\n",
    "--weights ./runs/train/exp4/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/exp4/weights/si.pt\\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4085fc49-0092-4b3d-ac57-55ecacb8eb8b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "608a6914-d550-4735-b05b-0fef046680ae",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f91b823-f1ff-44ea-80a0-49fd1dfbdff1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "626820aa-a910-4c18-9bc6-a9ff7a541d69",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "afaf11b6-ded6-4b15-9377-d7783f33de01",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "954f1595-0224-4008-a597-ae42625054a9",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_LwfPro: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_2oldmodels_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.001, Lwf_temperature=1.0, Old_models=['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/bb996a2ef79644dab98f6c8b6f052f3c\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Overriding model.yaml nc=36 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_2oldmodels_openimages2/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_2oldmodels_openimages2\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.68G    0.09246    0.04148     0.0559         40        640: 1\n",
      "tensor([27.86511], device='cuda:0', grad_fn=<AddBackward0>) tensor(7001.13477, device='cuda:0', grad_fn=<AddBackward0>), tensor(19631.20508, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.824     0.0576      0.102     0.0449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.79G    0.07202    0.04056    0.04308         63        640: 1\n",
      "tensor([25.99399], device='cuda:0', grad_fn=<AddBackward0>) tensor(6574.50000, device='cuda:0', grad_fn=<AddBackward0>), tensor(18025.82227, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.571      0.186      0.194      0.107\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.79G    0.06435    0.03943    0.04089         57        640: 1\n",
      "tensor([26.14696], device='cuda:0', grad_fn=<AddBackward0>) tensor(5824.33154, device='cuda:0', grad_fn=<AddBackward0>), tensor(19248.64648, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.599      0.209      0.212      0.122\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.79G    0.06077    0.03891    0.03884         42        640: 1\n",
      "tensor([24.15054], device='cuda:0', grad_fn=<AddBackward0>) tensor(5194.24561, device='cuda:0', grad_fn=<AddBackward0>), tensor(17953.48633, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.61      0.216      0.228      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.79G    0.05805    0.03751    0.03768         36        640: 1\n",
      "tensor([23.25276], device='cuda:0', grad_fn=<AddBackward0>) tensor(5746.34814, device='cuda:0', grad_fn=<AddBackward0>), tensor(16349.26172, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.622       0.21      0.239       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.79G    0.05658    0.03831    0.03623         39        640: 1\n",
      "tensor([24.41273], device='cuda:0', grad_fn=<AddBackward0>) tensor(6771.28125, device='cuda:0', grad_fn=<AddBackward0>), tensor(16675.79688, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.68      0.211      0.243       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.79G    0.05609    0.03776    0.03557         68        640: 1\n",
      "tensor([23.77691], device='cuda:0', grad_fn=<AddBackward0>) tensor(7131.90332, device='cuda:0', grad_fn=<AddBackward0>), tensor(15173.38477, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.653      0.233      0.252      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.79G    0.05598    0.03766    0.03602         31        640: 1\n",
      "tensor([23.40357], device='cuda:0', grad_fn=<AddBackward0>) tensor(7712.30176, device='cuda:0', grad_fn=<AddBackward0>), tensor(14678.38672, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.652       0.22      0.264      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.79G    0.05535    0.03769    0.03507         35        640: 1\n",
      "tensor([24.56511], device='cuda:0', grad_fn=<AddBackward0>) tensor(7246.59326, device='cuda:0', grad_fn=<AddBackward0>), tensor(16395.88281, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.502      0.262      0.259      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.79G    0.05486    0.03762    0.03489         42        640: 1\n",
      "tensor([24.28188], device='cuda:0', grad_fn=<AddBackward0>) tensor(7311.36914, device='cuda:0', grad_fn=<AddBackward0>), tensor(15914.45410, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.256      0.271      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.79G    0.05453    0.03716    0.03428         38        640: 1\n",
      "tensor([23.86443], device='cuda:0', grad_fn=<AddBackward0>) tensor(6532.69922, device='cuda:0', grad_fn=<AddBackward0>), tensor(16439.77734, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.507      0.266      0.273      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.79G    0.05411    0.03773    0.03368         59        640: 1\n",
      "tensor([24.60172], device='cuda:0', grad_fn=<AddBackward0>) tensor(7504.57812, device='cuda:0', grad_fn=<AddBackward0>), tensor(15922.06836, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.499      0.274      0.272      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.79G    0.05372    0.03741    0.03476         46        640: 1\n",
      "tensor([24.18530], device='cuda:0', grad_fn=<AddBackward0>) tensor(7328.81885, device='cuda:0', grad_fn=<AddBackward0>), tensor(15843.14648, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.248      0.275      0.177\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.79G    0.05361     0.0372    0.03356         47        640: 1\n",
      "tensor([24.50324], device='cuda:0', grad_fn=<AddBackward0>) tensor(7123.42090, device='cuda:0', grad_fn=<AddBackward0>), tensor(16297.96484, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.544      0.284      0.283      0.176\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.79G    0.05326    0.03675    0.03373         32        640: 1\n",
      "tensor([22.94358], device='cuda:0', grad_fn=<AddBackward0>) tensor(7075.76416, device='cuda:0', grad_fn=<AddBackward0>), tensor(15101.28125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.579      0.285      0.294      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.79G    0.05291    0.03763    0.03331         48        640: 1\n",
      "tensor([22.85394], device='cuda:0', grad_fn=<AddBackward0>) tensor(6619.36621, device='cuda:0', grad_fn=<AddBackward0>), tensor(15239.85547, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.444      0.306      0.289      0.183\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.79G    0.05312    0.03682    0.03306         43        640: 1\n",
      "tensor([21.51756], device='cuda:0', grad_fn=<AddBackward0>) tensor(6605.37061, device='cuda:0', grad_fn=<AddBackward0>), tensor(13918.00488, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.457      0.304      0.277      0.179\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.79G    0.05321    0.03754    0.03308         64        640: 1\n",
      "tensor([23.29844], device='cuda:0', grad_fn=<AddBackward0>) tensor(7179.35303, device='cuda:0', grad_fn=<AddBackward0>), tensor(15082.74316, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.414      0.331      0.296      0.194\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.79G    0.05295    0.03741    0.03329         61        640: 1\n",
      "tensor([22.27567], device='cuda:0', grad_fn=<AddBackward0>) tensor(6991.80078, device='cuda:0', grad_fn=<AddBackward0>), tensor(14151.92285, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.457      0.355      0.295      0.189\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.79G    0.05253    0.03776    0.03312         29        640: 1\n",
      "tensor([22.54631], device='cuda:0', grad_fn=<AddBackward0>) tensor(7524.40967, device='cuda:0', grad_fn=<AddBackward0>), tensor(14036.71094, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.443      0.325      0.285      0.182\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.79G    0.05197    0.03625    0.03217         39        640: 1\n",
      "tensor([21.82240], device='cuda:0', grad_fn=<AddBackward0>) tensor(6857.56494, device='cuda:0', grad_fn=<AddBackward0>), tensor(13883.98340, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.451      0.333      0.295       0.19\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.79G    0.05171    0.03696    0.03268         40        640: 1\n",
      "tensor([23.59541], device='cuda:0', grad_fn=<AddBackward0>) tensor(7929.68164, device='cuda:0', grad_fn=<AddBackward0>), tensor(14780.56250, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.419      0.324      0.299      0.195\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.79G    0.05185    0.03685    0.03294         44        640: 1\n",
      "tensor([21.82933], device='cuda:0', grad_fn=<AddBackward0>) tensor(6686.49365, device='cuda:0', grad_fn=<AddBackward0>), tensor(14056.80762, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.411      0.333      0.314      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/79      5.79G    0.05174    0.03726    0.03259         31        640: 1\n",
      "tensor([23.47060], device='cuda:0', grad_fn=<AddBackward0>) tensor(7114.29541, device='cuda:0', grad_fn=<AddBackward0>), tensor(15507.97070, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.315      0.309      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.79G    0.05128     0.0367    0.03242         72        640: 1\n",
      "tensor([23.36782], device='cuda:0', grad_fn=<AddBackward0>) tensor(7154.38379, device='cuda:0', grad_fn=<AddBackward0>), tensor(15092.87402, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.482      0.311      0.313        0.2\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.79G    0.05128    0.03709    0.03208         41        640: 1\n",
      "tensor([23.71852], device='cuda:0', grad_fn=<AddBackward0>) tensor(7932.16602, device='cuda:0', grad_fn=<AddBackward0>), tensor(14953.22363, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.473      0.344      0.299      0.197\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.79G    0.05172    0.03736    0.03265         33        640: 1\n",
      "tensor([23.06396], device='cuda:0', grad_fn=<AddBackward0>) tensor(7122.22021, device='cuda:0', grad_fn=<AddBackward0>), tensor(14963.95996, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.436      0.318      0.302      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.79G    0.05118    0.03682    0.03159         30        640: 1\n",
      "tensor([23.20310], device='cuda:0', grad_fn=<AddBackward0>) tensor(7423.17432, device='cuda:0', grad_fn=<AddBackward0>), tensor(15087.30371, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.449      0.341      0.311        0.2\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.79G    0.05168    0.03661     0.0319         30        640: 1\n",
      "tensor([22.62237], device='cuda:0', grad_fn=<AddBackward0>) tensor(7397.79004, device='cuda:0', grad_fn=<AddBackward0>), tensor(14434.16992, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.451      0.328      0.309      0.195\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.79G    0.05087    0.03659    0.03185         43        640: 1\n",
      "tensor([22.49397], device='cuda:0', grad_fn=<AddBackward0>) tensor(7120.05469, device='cuda:0', grad_fn=<AddBackward0>), tensor(14406.74023, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.481      0.342      0.323      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.79G    0.05162    0.03671    0.03188         62        640: 1\n",
      "tensor([22.63955], device='cuda:0', grad_fn=<AddBackward0>) tensor(6944.49219, device='cuda:0', grad_fn=<AddBackward0>), tensor(14586.28516, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.486      0.338      0.315      0.201\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.79G    0.05121    0.03659    0.03181         40        640: 1\n",
      "tensor([22.87240], device='cuda:0', grad_fn=<AddBackward0>) tensor(7200.13965, device='cuda:0', grad_fn=<AddBackward0>), tensor(14820.98340, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.454      0.361      0.335      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.79G    0.05086    0.03682    0.03148         37        640: 1\n",
      "tensor([23.40555], device='cuda:0', grad_fn=<AddBackward0>) tensor(7586.68115, device='cuda:0', grad_fn=<AddBackward0>), tensor(14940.72363, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.444      0.343      0.315      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.79G    0.05029    0.03611    0.03169         45        640: 1\n",
      "tensor([22.60008], device='cuda:0', grad_fn=<AddBackward0>) tensor(7065.55176, device='cuda:0', grad_fn=<AddBackward0>), tensor(14393.91406, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.332      0.327      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.79G    0.05104    0.03717    0.03181         70        640: 1\n",
      "tensor([22.82777], device='cuda:0', grad_fn=<AddBackward0>) tensor(6868.58447, device='cuda:0', grad_fn=<AddBackward0>), tensor(14980.56934, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.486      0.342       0.33      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.79G    0.05057     0.0365    0.03132         34        640: 1\n",
      "tensor([21.74274], device='cuda:0', grad_fn=<AddBackward0>) tensor(6966.64648, device='cuda:0', grad_fn=<AddBackward0>), tensor(13922.91211, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.477      0.342      0.343      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.79G    0.05017    0.03621    0.03143         52        640: 1\n",
      "tensor([21.43648], device='cuda:0', grad_fn=<AddBackward0>) tensor(6864.81934, device='cuda:0', grad_fn=<AddBackward0>), tensor(13575.24316, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.553      0.314      0.338      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.79G    0.05023    0.03651    0.03148         46        640: 1\n",
      "tensor([21.43563], device='cuda:0', grad_fn=<AddBackward0>) tensor(6782.41406, device='cuda:0', grad_fn=<AddBackward0>), tensor(13766.55957, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.464       0.33      0.327      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.79G     0.0499    0.03603    0.03108         42        640: 1\n",
      "tensor([22.33844], device='cuda:0', grad_fn=<AddBackward0>) tensor(6941.78857, device='cuda:0', grad_fn=<AddBackward0>), tensor(14401.49414, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.463       0.35       0.33       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.79G    0.05027    0.03632     0.0313         56        640: 1\n",
      "tensor([20.30964], device='cuda:0', grad_fn=<AddBackward0>) tensor(6359.87207, device='cuda:0', grad_fn=<AddBackward0>), tensor(12942.94531, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.477      0.337      0.329       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.79G    0.05013    0.03642    0.03135         47        640: 1\n",
      "tensor([23.34664], device='cuda:0', grad_fn=<AddBackward0>) tensor(7700.31006, device='cuda:0', grad_fn=<AddBackward0>), tensor(14462.40723, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.484      0.333      0.333      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.79G    0.05018    0.03668    0.03117         30        640: 1\n",
      "tensor([21.58003], device='cuda:0', grad_fn=<AddBackward0>) tensor(6896.45312, device='cuda:0', grad_fn=<AddBackward0>), tensor(13855.56055, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.498      0.341      0.336      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.79G    0.04995    0.03653     0.0309         31        640: 1\n",
      "tensor([21.49051], device='cuda:0', grad_fn=<AddBackward0>) tensor(7153.54932, device='cuda:0', grad_fn=<AddBackward0>), tensor(13656.28711, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.491      0.338      0.349      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.79G    0.05034    0.03698    0.03063         16        640: 1\n",
      "tensor([22.75265], device='cuda:0', grad_fn=<AddBackward0>) tensor(6999.64600, device='cuda:0', grad_fn=<AddBackward0>), tensor(15090.32422, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.494      0.339      0.351      0.238\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.79G    0.04998    0.03701    0.03098         39        640: 1\n",
      "tensor([22.19896], device='cuda:0', grad_fn=<AddBackward0>) tensor(6398.54736, device='cuda:0', grad_fn=<AddBackward0>), tensor(14894.95898, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.493      0.327      0.337      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.79G    0.04934    0.03657     0.0309         45        640: 1\n",
      "tensor([21.40305], device='cuda:0', grad_fn=<AddBackward0>) tensor(6225.19092, device='cuda:0', grad_fn=<AddBackward0>), tensor(14333.74902, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.499      0.359      0.334      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.79G    0.04963    0.03689    0.03157         50        640: 1\n",
      "tensor([21.85599], device='cuda:0', grad_fn=<AddBackward0>) tensor(6546.07422, device='cuda:0', grad_fn=<AddBackward0>), tensor(14262.76562, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.499      0.346      0.333      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.79G    0.04997    0.03635    0.03045         28        640: 1\n",
      "tensor([22.59113], device='cuda:0', grad_fn=<AddBackward0>) tensor(6885.84326, device='cuda:0', grad_fn=<AddBackward0>), tensor(14896.04297, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.521       0.35      0.352      0.236\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.79G    0.04929    0.03712    0.03123         36        640: 1\n",
      "tensor([21.39236], device='cuda:0', grad_fn=<AddBackward0>) tensor(6579.19727, device='cuda:0', grad_fn=<AddBackward0>), tensor(13987.63184, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.553      0.319      0.344      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.79G     0.0495    0.03652     0.0312         66        640: 1\n",
      "tensor([21.60418], device='cuda:0', grad_fn=<AddBackward0>) tensor(6781.36279, device='cuda:0', grad_fn=<AddBackward0>), tensor(13750.55762, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.529      0.307      0.334      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.79G     0.0492    0.03666    0.03116         54        640: 1\n",
      "tensor([20.54767], device='cuda:0', grad_fn=<AddBackward0>) tensor(6076.17676, device='cuda:0', grad_fn=<AddBackward0>), tensor(13565.71484, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.48      0.335      0.338      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.79G    0.04969    0.03678    0.03085         41        640: 1\n",
      "tensor([20.34256], device='cuda:0', grad_fn=<AddBackward0>) tensor(6051.51367, device='cuda:0', grad_fn=<AddBackward0>), tensor(13365.85059, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.529      0.342      0.336      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.79G     0.0489    0.03641    0.03049         45        640: 1\n",
      "tensor([21.51764], device='cuda:0', grad_fn=<AddBackward0>) tensor(6774.71191, device='cuda:0', grad_fn=<AddBackward0>), tensor(13732.77637, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.325      0.338      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.79G    0.04887    0.03652    0.03044         67        640: 1\n",
      "tensor([21.14585], device='cuda:0', grad_fn=<AddBackward0>) tensor(6479.62646, device='cuda:0', grad_fn=<AddBackward0>), tensor(13492.83008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.328      0.334      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.79G    0.04915    0.03669    0.03093         61        640: 1\n",
      "tensor([21.08524], device='cuda:0', grad_fn=<AddBackward0>) tensor(6407.72168, device='cuda:0', grad_fn=<AddBackward0>), tensor(13734.84961, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.494      0.338      0.333      0.225\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.79G    0.04874    0.03571    0.03075         55        640: 1\n",
      "tensor([20.91909], device='cuda:0', grad_fn=<AddBackward0>) tensor(6314.49072, device='cuda:0', grad_fn=<AddBackward0>), tensor(13539.18750, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.53      0.339      0.332      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.79G    0.04919    0.03619     0.0306         34        640: 1\n",
      "tensor([20.41055], device='cuda:0', grad_fn=<AddBackward0>) tensor(6379.42529, device='cuda:0', grad_fn=<AddBackward0>), tensor(13122.12012, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.492      0.342      0.337      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.79G     0.0487    0.03655    0.03034         36        640: 1\n",
      "tensor([22.42385], device='cuda:0', grad_fn=<AddBackward0>) tensor(6567.56152, device='cuda:0', grad_fn=<AddBackward0>), tensor(14938.66602, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.495      0.338      0.351      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.79G    0.04898    0.03687    0.03039         52        640: 1\n",
      "tensor([21.50755], device='cuda:0', grad_fn=<AddBackward0>) tensor(6674.99609, device='cuda:0', grad_fn=<AddBackward0>), tensor(13776.72949, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "      60/79      5.79G    0.04881    0.03628    0.03053         49        640: 1\n",
      "tensor([19.38536], device='cuda:0', grad_fn=<AddBackward0>) tensor(6031.75488, device='cuda:0', grad_fn=<AddBackward0>), tensor(12484.91895, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.497      0.353      0.338      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.79G    0.04861    0.03591    0.03025         52        640: 1\n",
      "tensor([19.53414], device='cuda:0', grad_fn=<AddBackward0>) tensor(5764.50049, device='cuda:0', grad_fn=<AddBackward0>), tensor(12741.66895, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.522      0.326      0.352      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.79G    0.04893     0.0365    0.03075         41        640: 1\n",
      "tensor([19.82314], device='cuda:0', grad_fn=<AddBackward0>) tensor(6246.33789, device='cuda:0', grad_fn=<AddBackward0>), tensor(12484.80566, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.517      0.345      0.346      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.79G    0.04857    0.03602    0.03064         34        640: 1\n",
      "tensor([20.44250], device='cuda:0', grad_fn=<AddBackward0>) tensor(5889.62891, device='cuda:0', grad_fn=<AddBackward0>), tensor(13777.13184, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.317      0.346       0.23\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.79G    0.04865    0.03553    0.03082         37        640: 1\n",
      "tensor([20.10297], device='cuda:0', grad_fn=<AddBackward0>) tensor(6363.08398, device='cuda:0', grad_fn=<AddBackward0>), tensor(12927.76465, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.493      0.348       0.34      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.79G    0.04874    0.03603    0.03022         33        640: 1\n",
      "tensor([20.62527], device='cuda:0', grad_fn=<AddBackward0>) tensor(6207.11621, device='cuda:0', grad_fn=<AddBackward0>), tensor(13468.36719, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.541      0.331      0.351      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.79G    0.04884    0.03624    0.03068         20        640: 1\n",
      "tensor([21.52386], device='cuda:0', grad_fn=<AddBackward0>) tensor(6458.88086, device='cuda:0', grad_fn=<AddBackward0>), tensor(14088.65234, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.515       0.34      0.349      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.79G     0.0483    0.03654    0.03081         49        640: 1\n",
      "tensor([20.57790], device='cuda:0', grad_fn=<AddBackward0>) tensor(5770.83643, device='cuda:0', grad_fn=<AddBackward0>), tensor(13858.00098, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.342      0.354      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.79G    0.04827    0.03639    0.03071         93        640: 1\n",
      "tensor([19.75714], device='cuda:0', grad_fn=<AddBackward0>) tensor(5829.05518, device='cuda:0', grad_fn=<AddBackward0>), tensor(12639.37402, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.522      0.353       0.35      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.79G    0.04883    0.03624    0.03057         78        640:  "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "IOPub message rate exceeded.\n",
      "The Jupyter server will temporarily stop sending output\n",
      "to the client in order to avoid crashing it.\n",
      "To change this limit, set the config variable\n",
      "`--ServerApp.iopub_msg_rate_limit`.\n",
      "\n",
      "Current values:\n",
      "ServerApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n",
      "ServerApp.rate_limit_window=3.0 (secs)\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.528      0.377      0.358      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.79G    0.04823    0.03636    0.03061         53        640: 1\n",
      "tensor([19.51720], device='cuda:0', grad_fn=<AddBackward0>) tensor(5769.74121, device='cuda:0', grad_fn=<AddBackward0>), tensor(12763.15723, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.525      0.358      0.354      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.79G     0.0486    0.03675    0.03053         59        640: 1\n",
      "tensor([19.96543], device='cuda:0', grad_fn=<AddBackward0>) tensor(5794.94189, device='cuda:0', grad_fn=<AddBackward0>), tensor(13074.04395, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.509      0.369      0.356      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.79G    0.04786    0.03557    0.03044         26        640: 1\n",
      "tensor([20.72850], device='cuda:0', grad_fn=<AddBackward0>) tensor(5722.00488, device='cuda:0', grad_fn=<AddBackward0>), tensor(14280.91992, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.508      0.357      0.356      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.79G    0.04851    0.03621    0.03032         54        640: 1\n",
      "tensor([19.91519], device='cuda:0', grad_fn=<AddBackward0>) tensor(5731.80322, device='cuda:0', grad_fn=<AddBackward0>), tensor(13078.42090, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.49      0.353       0.35      0.238\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.79G    0.04793    0.03573    0.03026         31        640: 1\n",
      "tensor([21.10859], device='cuda:0', grad_fn=<AddBackward0>) tensor(5823.06348, device='cuda:0', grad_fn=<AddBackward0>), tensor(14292.92969, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.526      0.338      0.352      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.79G     0.0482    0.03632    0.02995         56        640: 1\n",
      "tensor([20.57814], device='cuda:0', grad_fn=<AddBackward0>) tensor(5788.41992, device='cuda:0', grad_fn=<AddBackward0>), tensor(13644.68164, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.327      0.348      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.79G      0.048    0.03593    0.03049         45        640: 1\n",
      "tensor([19.47497], device='cuda:0', grad_fn=<AddBackward0>) tensor(5891.44775, device='cuda:0', grad_fn=<AddBackward0>), tensor(12783.65430, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.52      0.332      0.346      0.234\n",
      "\n",
      "80 epochs completed in 1.671 hours.\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages2/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages2/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_2oldmodels_openimages2/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.531      0.354      0.361      0.245\n",
      "                   car       1200        287      0.635      0.589      0.561      0.364\n",
      "                   van       1200         29      0.539      0.103      0.198       0.16\n",
      "                 truck       1200         29      0.256      0.241      0.193      0.127\n",
      "                person       1200       2264      0.482       0.32       0.33      0.162\n",
      "               bicycle       1200         54      0.572      0.481      0.486      0.313\n",
      "                  bird       1200        136      0.616      0.649      0.576      0.353\n",
      "                  boat       1200        145      0.651       0.51      0.529      0.268\n",
      "                bottle       1200         31          0          0    0.00167   0.000587\n",
      "                   bus       1200         15       0.52      0.733      0.753      0.626\n",
      "                   cat       1200          1          0          0      0.004     0.0028\n",
      "                 chair       1200         21      0.188      0.333      0.117     0.0549\n",
      "                   dog       1200         42      0.599      0.595      0.618       0.42\n",
      "                 horse       1200         44      0.737      0.682      0.754       0.48\n",
      "                 sheep       1200         10      0.242        0.6      0.308      0.178\n",
      "             billboard       1200          4          1          0    0.00254   0.000508\n",
      "                rabbit       1200         11        0.8      0.364      0.523      0.387\n",
      "                monkey       1200         18      0.634      0.667      0.698      0.519\n",
      "                   pig       1200          6      0.402        0.5      0.513      0.408\n",
      "                   toy       1200         64       0.28     0.0625      0.114     0.0619\n",
      "         traffic light       1200         18          1          0     0.0429     0.0258\n",
      "          traffic sign       1200          4          1          0      0.256      0.225\n",
      "Results saved to \u001b[1mruns/train/k_v_2oldmodels_openimages2\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/bb996a2ef79644dab98f6c8b6f052f3c\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.5230063195813975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.48587110494236346\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.31291410570403794\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.572369712707877\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.48148148148148145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.0025419295558958654\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.0005083859111791731\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6317972924637785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 55.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5760076349287888\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.35279294006074\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.6159153682301831\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.6485199548925039\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 88.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5720697762757193\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 40.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.5285710071030588\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.26772944728958276\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.6507800146055075\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.5103448275862069\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 74.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0016703284494564165\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.000586795140603537\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.6088339848960208\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.753188526751253\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.6259938913379705\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.5204724243763282\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.7333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6108683521507796\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 97.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.560666716778484\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.36355114558543367\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.6345970904023338\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5888501742160279\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 169.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.0039959839357429725\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.002797188755020081\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.2406187385269361\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 30.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.11704268102606807\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.05491117213384054\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.1882563929505608\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.3333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.5969305166072374\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6175318209851571\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.4197484676945712\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.5986325894327605\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5952380952380952\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.708203303206174\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.7541322323392936\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.47996516114661664\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.7367127441400924\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.6818181818181818\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 30.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (19.711091995239258, 89.72744750976562)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.10182492221209152, 0.36063842189034356)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.04488702389277525, 0.24455280596165674)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.4107725801057002, 0.8241706234397919)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.057561136581744055, 0.3766754426082833)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.6499876318567874\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.6981595675371876\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.5186721690672678\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.6341227975360254\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.38443593148216226\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 779.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.32969259644036575\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.16228270026765038\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.4818451549312951\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.3197879858657244\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 724.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.4456379563201078\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.5126486180996135\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.4081350302632538\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.4019376519376519\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.5002895331158272\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.5226252500186209\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.3868984744277049\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.8001044115276326\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.3639211223480887\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.3452326698745507\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.3076442026442026\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.17799286766737146\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.24233448638511929\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.10219951298895207\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.11430497184048911\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.061899621814735864\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.28014618980351064\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.0625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.04293557422969188\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.025761344537815128\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.25574351804778855\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.22547356380274527\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.047855641692876816, 0.09246472269296646)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.029954541474580765, 0.055902980268001556)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.03552769497036934, 0.04147909954190254)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.2483304882298124\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.1925000478124235\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.12681652475472055\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.2556938937214561\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.2413793103448276\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.0508703850209713, 0.07776906341314316)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.028164932504296303, 0.04037657007575035)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.02394947223365307, 0.026728931814432144)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.17359221379006862\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.19822412781255033\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.1603183255156388\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.539203489203489\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.10344827586206896\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/bb996a2ef79644dab98f6c8b6f052f3c\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : ['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 80\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_2oldmodels_openimages2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.27 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Still uploading 8 file(s), remaining 49.17 KB/2.40 MB\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_LwfPro.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-3 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "        ./runs/train/fog_02/weights/last.pt \\\n",
    "--name k_v_2oldmodels_openimages \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "464bc3c7-a1b2-4b7d-b073-25f0714ea4d3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2f210fbe-76ab-497f-92f1-bd3718f7a1b3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c1e4a6fc-314a-4093-a11e-db231382cf6a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_LwfPro: \u001b[0mweights=./runs/train/increment_VOC_plain/weights/last.pt, cfg=models/yolov5s_openimages.yaml, data=data/openimages.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=80, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=k_v_2oldmodels_openimages, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=[0.0005, 0.0005], Lwf_temperature=1.0, Old_models=['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2895 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/ff02de26a167432d8de5b06286b5fe54\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1    110583  models.yolo.Detect                      [36, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7116727 parameters, 7116727 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/355 items from runs/train/increment_VOC_plain/weights/last.pt\n",
      "Overriding model.yaml nc=36 with nc=26\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83613  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7089757 parameters, 7089757 gradients, 16.2 GFLOPs\n",
      "\n",
      "Overriding model.yaml nc=36 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35067  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_openimages summary: 217 layers, 7041211 parameters, 7041211 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 66 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/train.cache... 4200 \u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/val.cache... 1200 imag\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.02 anchors/target, 0.998 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/k_v_2oldmodels_openimages5/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/k_v_2oldmodels_openimages5\u001b[0m\n",
      "Starting training for 80 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/79      3.68G    0.08795     0.0417    0.05474         40        640: 1\n",
      "tensor([14.67160], device='cuda:0', grad_fn=<AddBackward0>) tensor(7307.94727, device='cuda:0', grad_fn=<AddBackward0>), tensor(19797.18359, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.895     0.0682      0.108     0.0521\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/79      5.79G      0.066     0.0393    0.03923         63        640: 1\n",
      "tensor([13.64006], device='cuda:0', grad_fn=<AddBackward0>) tensor(6539.90869, device='cuda:0', grad_fn=<AddBackward0>), tensor(18127.86914, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.573      0.216      0.217      0.124\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/79      5.79G    0.06022    0.03826    0.03632         57        640: 1\n",
      "tensor([13.59597], device='cuda:0', grad_fn=<AddBackward0>) tensor(5806.85498, device='cuda:0', grad_fn=<AddBackward0>), tensor(19379.59961, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.614      0.214      0.234      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/79      5.79G    0.05774    0.03798    0.03427         42        640: 1\n",
      "tensor([12.59937], device='cuda:0', grad_fn=<AddBackward0>) tensor(5204.52197, device='cuda:0', grad_fn=<AddBackward0>), tensor(18137.44531, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.57      0.243      0.258      0.153\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/79      5.79G    0.05587    0.03666    0.03331         36        640: 1\n",
      "tensor([12.22401], device='cuda:0', grad_fn=<AddBackward0>) tensor(5849.38477, device='cuda:0', grad_fn=<AddBackward0>), tensor(16408.05078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.575      0.256      0.265      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/79      5.79G    0.05468     0.0376      0.032         39        640: 1\n",
      "tensor([12.83266], device='cuda:0', grad_fn=<AddBackward0>) tensor(6997.78174, device='cuda:0', grad_fn=<AddBackward0>), tensor(16796.60156, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.254      0.273      0.171\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/79      5.79G    0.05413    0.03704     0.0313         68        640: 1\n",
      "tensor([12.64499], device='cuda:0', grad_fn=<AddBackward0>) tensor(7173.27344, device='cuda:0', grad_fn=<AddBackward0>), tensor(15333.38867, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.591      0.281      0.287      0.183\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/79      5.79G    0.05397    0.03697    0.03168         31        640: 1\n",
      "tensor([12.19321], device='cuda:0', grad_fn=<AddBackward0>) tensor(7660.17432, device='cuda:0', grad_fn=<AddBackward0>), tensor(14900.86816, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.456      0.308      0.298      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/79      5.79G    0.05346    0.03698    0.03082         35        640: 1\n",
      "tensor([12.77998], device='cuda:0', grad_fn=<AddBackward0>) tensor(7315.43604, device='cuda:0', grad_fn=<AddBackward0>), tensor(16495.04297, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.444      0.332      0.287      0.183\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/79      5.79G    0.05294    0.03689    0.03047         42        640: 1\n",
      "tensor([12.62621], device='cuda:0', grad_fn=<AddBackward0>) tensor(7005.51270, device='cuda:0', grad_fn=<AddBackward0>), tensor(16262.00293, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.46      0.328      0.309      0.197\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/79      5.79G     0.0527    0.03641     0.0299         38        640: 1\n",
      "tensor([12.39494], device='cuda:0', grad_fn=<AddBackward0>) tensor(6403.33252, device='cuda:0', grad_fn=<AddBackward0>), tensor(16660.51562, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.46      0.333      0.297      0.191\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/79      5.79G    0.05208    0.03702    0.02933         59        640: 1\n",
      "tensor([12.92537], device='cuda:0', grad_fn=<AddBackward0>) tensor(7606.80078, device='cuda:0', grad_fn=<AddBackward0>), tensor(15982.36426, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.494      0.297      0.303      0.201\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/79      5.79G    0.05171    0.03665    0.03028         46        640: 1\n",
      "tensor([12.60508], device='cuda:0', grad_fn=<AddBackward0>) tensor(7074.55322, device='cuda:0', grad_fn=<AddBackward0>), tensor(16266.58203, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.47      0.331      0.307      0.202\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/79      5.79G    0.05161    0.03642    0.02906         47        640: 1\n",
      "tensor([12.86347], device='cuda:0', grad_fn=<AddBackward0>) tensor(7719.43066, device='cuda:0', grad_fn=<AddBackward0>), tensor(15936.36328, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.476      0.315      0.299      0.188\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/79      5.79G    0.05124    0.03595    0.02918         32        640: 1\n",
      "tensor([11.93586], device='cuda:0', grad_fn=<AddBackward0>) tensor(7631.53613, device='cuda:0', grad_fn=<AddBackward0>), tensor(14865.47168, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.505      0.327      0.324      0.208\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/79      5.79G    0.05083    0.03679    0.02862         48        640: 1\n",
      "tensor([12.00949], device='cuda:0', grad_fn=<AddBackward0>) tensor(6741.98779, device='cuda:0', grad_fn=<AddBackward0>), tensor(15405.08789, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.477      0.344      0.318      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/79      5.79G    0.05099    0.03599    0.02849         43        640: 1\n",
      "tensor([11.32947], device='cuda:0', grad_fn=<AddBackward0>) tensor(6692.46387, device='cuda:0', grad_fn=<AddBackward0>), tensor(14059.69434, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.457      0.345      0.318       0.21\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/79      5.79G    0.05109    0.03664     0.0285         64        640: 1\n",
      "tensor([12.12273], device='cuda:0', grad_fn=<AddBackward0>) tensor(7159.55566, device='cuda:0', grad_fn=<AddBackward0>), tensor(15135.31543, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.461      0.353      0.331      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/79      5.79G    0.05079    0.03647     0.0286         61        640: 1\n",
      "tensor([11.69432], device='cuda:0', grad_fn=<AddBackward0>) tensor(7108.02295, device='cuda:0', grad_fn=<AddBackward0>), tensor(14205.51660, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233        0.5      0.358      0.333      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/79      5.79G    0.05037    0.03681    0.02834         29        640: 1\n",
      "tensor([11.78952], device='cuda:0', grad_fn=<AddBackward0>) tensor(7644.94580, device='cuda:0', grad_fn=<AddBackward0>), tensor(14193.83887, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.466      0.351       0.33      0.215\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/79      5.79G    0.04995    0.03529    0.02746         39        640: 1\n",
      "tensor([11.38879], device='cuda:0', grad_fn=<AddBackward0>) tensor(6912.22900, device='cuda:0', grad_fn=<AddBackward0>), tensor(13918.98242, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.516      0.324      0.325      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/79      5.79G    0.04945    0.03602    0.02792         40        640: 1\n",
      "tensor([12.30295], device='cuda:0', grad_fn=<AddBackward0>) tensor(7968.79150, device='cuda:0', grad_fn=<AddBackward0>), tensor(14976.44531, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.486      0.336      0.337      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/79      5.79G    0.04963    0.03588    0.02808         44        640: 1\n",
      "tensor([11.55285], device='cuda:0', grad_fn=<AddBackward0>) tensor(6852.16699, device='cuda:0', grad_fn=<AddBackward0>), tensor(14186.93066, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.454       0.37      0.334      0.218\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.306      0.333      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/79      5.79G    0.04917    0.03567    0.02743         72        640: 1\n",
      "tensor([12.31969], device='cuda:0', grad_fn=<AddBackward0>) tensor(7403.04785, device='cuda:0', grad_fn=<AddBackward0>), tensor(15199.50586, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.487      0.352      0.347      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/79      5.79G    0.04905    0.03608     0.0273         41        640: 1\n",
      "tensor([12.26025], device='cuda:0', grad_fn=<AddBackward0>) tensor(7878.55713, device='cuda:0', grad_fn=<AddBackward0>), tensor(15081.47559, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.508      0.354      0.351      0.238\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/79      5.79G    0.04936    0.03621    0.02757         33        640: 1\n",
      "tensor([12.06292], device='cuda:0', grad_fn=<AddBackward0>) tensor(7129.60645, device='cuda:0', grad_fn=<AddBackward0>), tensor(15112.15430, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.485      0.355      0.341      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/79      5.79G      0.049     0.0357    0.02663         30        640: 1\n",
      "tensor([11.92419], device='cuda:0', grad_fn=<AddBackward0>) tensor(7382.70898, device='cuda:0', grad_fn=<AddBackward0>), tensor(15171.41211, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.533      0.356      0.347      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/79      5.79G    0.04926    0.03548    0.02691         30        640: 1\n",
      "tensor([11.76292], device='cuda:0', grad_fn=<AddBackward0>) tensor(7475.29492, device='cuda:0', grad_fn=<AddBackward0>), tensor(14543.77832, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.53      0.372      0.373      0.246\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/79      5.79G    0.04855    0.03542    0.02679         43        640: 1\n",
      "tensor([11.67052], device='cuda:0', grad_fn=<AddBackward0>) tensor(7026.65723, device='cuda:0', grad_fn=<AddBackward0>), tensor(14517.68262, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.544      0.361      0.361      0.246\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/79      5.79G    0.04915    0.03549    0.02667         62        640: 1\n",
      "tensor([12.03636], device='cuda:0', grad_fn=<AddBackward0>) tensor(7112.77002, device='cuda:0', grad_fn=<AddBackward0>), tensor(14797.03711, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.365      0.364       0.24\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/79      5.79G    0.04871    0.03538    0.02665         40        640: 1\n",
      "tensor([11.97042], device='cuda:0', grad_fn=<AddBackward0>) tensor(7378.77051, device='cuda:0', grad_fn=<AddBackward0>), tensor(14950.13379, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.499      0.379      0.359      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/79      5.79G    0.04844    0.03555    0.02623         37        640: 1\n",
      "tensor([12.18796], device='cuda:0', grad_fn=<AddBackward0>) tensor(7678.96924, device='cuda:0', grad_fn=<AddBackward0>), tensor(15071.03320, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.486      0.371      0.341      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/79      5.79G    0.04784    0.03494    0.02658         45        640: 1\n",
      "tensor([11.85162], device='cuda:0', grad_fn=<AddBackward0>) tensor(7009.05957, device='cuda:0', grad_fn=<AddBackward0>), tensor(14547.00195, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.533       0.35      0.345      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/79      5.79G    0.04857    0.03588    0.02647         70        640: 1\n",
      "tensor([12.01505], device='cuda:0', grad_fn=<AddBackward0>) tensor(7055.32178, device='cuda:0', grad_fn=<AddBackward0>), tensor(15151.77441, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.532      0.365      0.368      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/79      5.79G    0.04811    0.03523    0.02625         34        640: 1\n",
      "tensor([11.36333], device='cuda:0', grad_fn=<AddBackward0>) tensor(7025.44092, device='cuda:0', grad_fn=<AddBackward0>), tensor(14095.42969, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.375      0.373       0.25\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/79      5.79G    0.04763    0.03488    0.02613         52        640: 1\n",
      "tensor([11.31150], device='cuda:0', grad_fn=<AddBackward0>) tensor(7081.71191, device='cuda:0', grad_fn=<AddBackward0>), tensor(13746.68457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.564      0.368      0.369      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/79      5.79G    0.04764    0.03513    0.02608         46        640: 1\n",
      "tensor([11.27647], device='cuda:0', grad_fn=<AddBackward0>) tensor(7043.00537, device='cuda:0', grad_fn=<AddBackward0>), tensor(13860.59961, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.523      0.359      0.357      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/79      5.79G    0.04737     0.0347    0.02574         42        640: 1\n",
      "tensor([11.72984], device='cuda:0', grad_fn=<AddBackward0>) tensor(7036.72754, device='cuda:0', grad_fn=<AddBackward0>), tensor(14552.59766, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.561      0.368      0.376      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/79      5.79G    0.04773      0.035    0.02604         56        640: 1\n",
      "tensor([10.70049], device='cuda:0', grad_fn=<AddBackward0>) tensor(6398.88525, device='cuda:0', grad_fn=<AddBackward0>), tensor(13116.19141, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.504      0.386      0.359      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/79      5.79G    0.04763    0.03505    0.02602         47        640: 1\n",
      "tensor([12.25820], device='cuda:0', grad_fn=<AddBackward0>) tensor(7761.84082, device='cuda:0', grad_fn=<AddBackward0>), tensor(14557.39258, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.508      0.364      0.368      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/79      5.79G    0.04749    0.03528    0.02582         30        640: 1\n",
      "tensor([11.29146], device='cuda:0', grad_fn=<AddBackward0>) tensor(7139.72656, device='cuda:0', grad_fn=<AddBackward0>), tensor(13982.81934, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.559      0.356      0.374      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/79      5.79G    0.04718    0.03498    0.02538         31        640: 1\n",
      "tensor([11.17238], device='cuda:0', grad_fn=<AddBackward0>) tensor(7291.67334, device='cuda:0', grad_fn=<AddBackward0>), tensor(13806.32422, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.563      0.365      0.378      0.259\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/79      5.79G    0.04771    0.03553    0.02527         16        640: 1\n",
      "tensor([11.79544], device='cuda:0', grad_fn=<AddBackward0>) tensor(7186.95361, device='cuda:0', grad_fn=<AddBackward0>), tensor(15191.56445, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.613      0.354      0.377      0.259\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/79      5.79G    0.04728    0.03546    0.02555         39        640: 1\n",
      "tensor([11.61216], device='cuda:0', grad_fn=<AddBackward0>) tensor(6469.73096, device='cuda:0', grad_fn=<AddBackward0>), tensor(15087.16504, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.516      0.353      0.363      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/79      5.79G    0.04659    0.03501    0.02536         45        640: 1\n",
      "tensor([11.18947], device='cuda:0', grad_fn=<AddBackward0>) tensor(6343.09277, device='cuda:0', grad_fn=<AddBackward0>), tensor(14499.95117, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.371      0.359      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/79      5.79G    0.04674    0.03533    0.02602         50        640: 1\n",
      "tensor([11.48455], device='cuda:0', grad_fn=<AddBackward0>) tensor(6697.58545, device='cuda:0', grad_fn=<AddBackward0>), tensor(14346.72266, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.622      0.344      0.375      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/79      5.79G    0.04709     0.0348    0.02501         28        640: 1\n",
      "tensor([11.78681], device='cuda:0', grad_fn=<AddBackward0>) tensor(7036.13672, device='cuda:0', grad_fn=<AddBackward0>), tensor(15044.29199, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.636      0.359      0.375      0.255\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/79      5.79G    0.04634    0.03548    0.02559         36        640: 1\n",
      "tensor([11.19217], device='cuda:0', grad_fn=<AddBackward0>) tensor(6728.02393, device='cuda:0', grad_fn=<AddBackward0>), tensor(14135.28613, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.604      0.359      0.379      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/79      5.79G    0.04669    0.03488    0.02548         66        640: 1\n",
      "tensor([11.33947], device='cuda:0', grad_fn=<AddBackward0>) tensor(6863.66992, device='cuda:0', grad_fn=<AddBackward0>), tensor(13845.29883, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.353      0.363      0.247\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/79      5.79G    0.04621    0.03497    0.02551         54        640: 1\n",
      "tensor([10.76046], device='cuda:0', grad_fn=<AddBackward0>) tensor(6206.10596, device='cuda:0', grad_fn=<AddBackward0>), tensor(13618.13867, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.536      0.353      0.367      0.252\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/79      5.79G    0.04672     0.0351    0.02509         41        640: 1\n",
      "tensor([10.69867], device='cuda:0', grad_fn=<AddBackward0>) tensor(6187.36816, device='cuda:0', grad_fn=<AddBackward0>), tensor(13493.58105, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.337      0.371      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/79      5.79G    0.04589    0.03467    0.02483         45        640: 1\n",
      "tensor([11.25869], device='cuda:0', grad_fn=<AddBackward0>) tensor(6819.34424, device='cuda:0', grad_fn=<AddBackward0>), tensor(13861.92285, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.546      0.362      0.364      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/79      5.79G    0.04586     0.0348    0.02475         67        640: 1\n",
      "tensor([11.13651], device='cuda:0', grad_fn=<AddBackward0>) tensor(6583.41943, device='cuda:0', grad_fn=<AddBackward0>), tensor(13583.69824, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.559      0.377      0.381      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/79      5.79G    0.04605    0.03497     0.0252         61        640: 1\n",
      "tensor([11.08466], device='cuda:0', grad_fn=<AddBackward0>) tensor(6568.05664, device='cuda:0', grad_fn=<AddBackward0>), tensor(13850.83008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.553      0.349      0.376      0.257\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/79      5.79G    0.04576    0.03394    0.02489         55        640: 1\n",
      "tensor([11.09199], device='cuda:0', grad_fn=<AddBackward0>) tensor(6542.68359, device='cuda:0', grad_fn=<AddBackward0>), tensor(13677.31934, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.556      0.366      0.369      0.249\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/79      5.79G    0.04618    0.03449     0.0248         34        640: 1\n",
      "tensor([10.68697], device='cuda:0', grad_fn=<AddBackward0>) tensor(6472.51855, device='cuda:0', grad_fn=<AddBackward0>), tensor(13239.63379, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.637      0.345      0.385      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/79      5.79G    0.04573    0.03474    0.02458         36        640: 1\n",
      "tensor([11.71408], device='cuda:0', grad_fn=<AddBackward0>) tensor(6698.89697, device='cuda:0', grad_fn=<AddBackward0>), tensor(15030.55859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.621       0.36      0.386      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/79      5.79G    0.04574    0.03507    0.02463         52        640: 1\n",
      "tensor([11.34476], device='cuda:0', grad_fn=<AddBackward0>) tensor(6854.42969, device='cuda:0', grad_fn=<AddBackward0>), tensor(13912.23828, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.596      0.354      0.383      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/79      5.79G    0.04551    0.03412    0.02434         31        640: 1\n",
      "tensor([11.70835], device='cuda:0', grad_fn=<AddBackward0>) tensor(6804.60254, device='cuda:0', grad_fn=<AddBackward0>), tensor(15149.82910, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.597      0.372      0.371      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/79      5.79G    0.04558    0.03444    0.02469         49        640: 1\n",
      "tensor([10.20083], device='cuda:0', grad_fn=<AddBackward0>) tensor(6222.22021, device='cuda:0', grad_fn=<AddBackward0>), tensor(12550.85059, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.614      0.351      0.367      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/79      5.79G     0.0455     0.0341    0.02451         52        640: 1\n",
      "tensor([10.33280], device='cuda:0', grad_fn=<AddBackward0>) tensor(5890.53027, device='cuda:0', grad_fn=<AddBackward0>), tensor(12847.68555, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616      0.345      0.381       0.26\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/79      5.79G    0.04569     0.0346    0.02477         41        640: 1\n",
      "tensor([10.52053], device='cuda:0', grad_fn=<AddBackward0>) tensor(6419.48975, device='cuda:0', grad_fn=<AddBackward0>), tensor(12618.65430, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.55      0.359      0.384      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/79      5.79G     0.0453    0.03408    0.02462         34        640: 1\n",
      "tensor([10.68158], device='cuda:0', grad_fn=<AddBackward0>) tensor(6046.80029, device='cuda:0', grad_fn=<AddBackward0>), tensor(13874.34570, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.589      0.375      0.376      0.259\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/79      5.79G    0.04528    0.03371    0.02486         37        640: 1\n",
      "tensor([10.56200], device='cuda:0', grad_fn=<AddBackward0>) tensor(6549.98682, device='cuda:0', grad_fn=<AddBackward0>), tensor(13059.22363, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.615      0.376      0.381      0.258\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/79      5.79G    0.04544    0.03414    0.02422         33        640: 1\n",
      "tensor([10.89680], device='cuda:0', grad_fn=<AddBackward0>) tensor(6379.06543, device='cuda:0', grad_fn=<AddBackward0>), tensor(13613.28613, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.535      0.391      0.384      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/79      5.79G    0.04544    0.03432    0.02464         20        640: 1\n",
      "tensor([11.30803], device='cuda:0', grad_fn=<AddBackward0>) tensor(6671.96094, device='cuda:0', grad_fn=<AddBackward0>), tensor(14189.93945, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.621      0.361      0.377      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/79      5.79G    0.04496    0.03459    0.02476         49        640: 1\n",
      "tensor([10.83581], device='cuda:0', grad_fn=<AddBackward0>) tensor(5926.68408, device='cuda:0', grad_fn=<AddBackward0>), tensor(13999.65625, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.606      0.357      0.387      0.266\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/79      5.79G    0.04475    0.03441    0.02459         93        640: 1\n",
      "tensor([10.54932], device='cuda:0', grad_fn=<AddBackward0>) tensor(6008.17285, device='cuda:0', grad_fn=<AddBackward0>), tensor(12762.66113, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233       0.55      0.367      0.388      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/79      5.79G    0.04484    0.03435    0.02419         32        640: 1\n",
      "tensor([10.65046], device='cuda:0', grad_fn=<AddBackward0>) tensor(6137.38428, device='cuda:0', grad_fn=<AddBackward0>), tensor(13625.97949, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.569      0.368      0.386      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/79      5.79G    0.04507    0.03421    0.02469         47        640: 1\n",
      "tensor([10.64936], device='cuda:0', grad_fn=<AddBackward0>) tensor(5884.52197, device='cuda:0', grad_fn=<AddBackward0>), tensor(13578.51562, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.596      0.364      0.387      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/79      5.79G    0.04474    0.03476    0.02411         43        640: 1\n",
      "tensor([10.86871], device='cuda:0', grad_fn=<AddBackward0>) tensor(5833.64062, device='cuda:0', grad_fn=<AddBackward0>), tensor(14423.53516, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.608      0.366      0.381      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/79      5.79G    0.04489    0.03383    0.02426         37        640: 1\n",
      "tensor([10.57979], device='cuda:0', grad_fn=<AddBackward0>) tensor(6272.71924, device='cuda:0', grad_fn=<AddBackward0>), tensor(13223.65820, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.614      0.353      0.383      0.266\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/79      5.79G    0.04475    0.03427    0.02439         53        640: 1\n",
      "tensor([10.32949], device='cuda:0', grad_fn=<AddBackward0>) tensor(5951.19043, device='cuda:0', grad_fn=<AddBackward0>), tensor(12886.09961, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.608       0.36      0.386      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/79      5.79G    0.04492    0.03471    0.02434         59        640: 1\n",
      "tensor([10.62280], device='cuda:0', grad_fn=<AddBackward0>) tensor(6012.58594, device='cuda:0', grad_fn=<AddBackward0>), tensor(13234.73340, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.618      0.353      0.385      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/79      5.79G    0.04422    0.03354    0.02427         26        640: 1\n",
      "tensor([10.76006], device='cuda:0', grad_fn=<AddBackward0>) tensor(5874.48926, device='cuda:0', grad_fn=<AddBackward0>), tensor(14393.65918, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.602      0.357      0.385      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/79      5.79G    0.04482    0.03414    0.02412         54        640: 1\n",
      "tensor([10.55576], device='cuda:0', grad_fn=<AddBackward0>) tensor(6009.92090, device='cuda:0', grad_fn=<AddBackward0>), tensor(13180.98145, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.606      0.363      0.384      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/79      5.79G    0.04423    0.03367    0.02405         31        640: 1\n",
      "tensor([11.10316], device='cuda:0', grad_fn=<AddBackward0>) tensor(6004.27588, device='cuda:0', grad_fn=<AddBackward0>), tensor(14372.88379, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.622      0.343      0.385      0.265\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/79      5.79G    0.04447    0.03416    0.02369         56        640: 1\n",
      "tensor([10.89891], device='cuda:0', grad_fn=<AddBackward0>) tensor(5993.19775, device='cuda:0', grad_fn=<AddBackward0>), tensor(13778.16309, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.616      0.348      0.382      0.263\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/79      5.79G    0.04421    0.03382    0.02417         45        640: 1\n",
      "tensor([10.22584], device='cuda:0', grad_fn=<AddBackward0>) tensor(6105.43164, device='cuda:0', grad_fn=<AddBackward0>), tensor(12874.53223, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.609      0.354      0.383      0.264\n",
      "\n",
      "80 epochs completed in 1.701 hours.\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages5/weights/last.pt, 14.6MB\n",
      "Optimizer stripped from runs/train/k_v_2oldmodels_openimages5/weights/best.pt, 14.6MB\n",
      "\n",
      "Validating runs/train/k_v_2oldmodels_openimages5/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1200       3233      0.604      0.357      0.385      0.268\n",
      "                   car       1200        287      0.654      0.568      0.571      0.373\n",
      "                   van       1200         29      0.739      0.197      0.359      0.296\n",
      "                 truck       1200         29      0.279      0.161      0.231      0.155\n",
      "                person       1200       2264      0.455      0.348      0.335      0.166\n",
      "               bicycle       1200         54      0.591      0.463      0.493      0.326\n",
      "                  bird       1200        136        0.6       0.64      0.549      0.334\n",
      "                  boat       1200        145      0.666      0.468      0.535      0.285\n",
      "                bottle       1200         31          0          0    0.00227   0.000931\n",
      "                   bus       1200         15      0.694      0.733      0.789      0.669\n",
      "                   cat       1200          1          1          0    0.00196    0.00137\n",
      "                 chair       1200         21      0.104      0.143     0.0953     0.0498\n",
      "                   dog       1200         42      0.693      0.592      0.632      0.449\n",
      "                 horse       1200         44      0.793      0.705      0.741      0.494\n",
      "                 sheep       1200         10      0.275        0.5      0.444      0.259\n",
      "             billboard       1200          4          1          0    0.00218   0.000437\n",
      "                rabbit       1200         11      0.785      0.455      0.552      0.448\n",
      "                monkey       1200         18      0.603      0.722      0.723      0.552\n",
      "                   pig       1200          6      0.433      0.667      0.579      0.443\n",
      "                   toy       1200         64      0.318      0.131      0.143       0.08\n",
      "         traffic light       1200         18          1          0     0.0415     0.0188\n",
      "          traffic sign       1200          4          1          0      0.261      0.232\n",
      "Results saved to \u001b[1mruns/train/k_v_2oldmodels_openimages5\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/ff02de26a167432d8de5b06286b5fe54\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_f1                    : 0.5191508109193309\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_false_positives       : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5                : 0.49324684559717435\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_mAP@.5:.95            : 0.32601004034613595\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_precision             : 0.5908611141169281\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_recall                : 0.46296296296296297\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_support               : 54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bicycle_true_positives        : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_f1                  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_false_positives     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5              : 0.0021828094024355705\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_mAP@.5:.95          : 0.0004365618804871141\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_precision           : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_recall              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_support             : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     billboard_true_positives      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_f1                       : 0.6191171313685542\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_false_positives          : 58.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5                   : 0.5491996462273279\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_mAP@.5:.95               : 0.33434373924390626\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_precision                : 0.5998123425601266\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_recall                   : 0.6397058823529411\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_support                  : 136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bird_true_positives           : 87.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_f1                       : 0.5499815522575356\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_false_positives          : 34.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5                   : 0.5346216897914731\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_mAP@.5:.95               : 0.2847842036383025\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_precision                : 0.666315476044275\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_recall                   : 0.46823164994079536\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_support                  : 145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     boat_true_positives           : 68.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5                 : 0.0022693134089657005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_mAP@.5:.95             : 0.000931485137543105\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_precision              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_support                : 31\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bottle_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_f1                        : 0.7130244281453042\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5                    : 0.7891561278339284\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_mAP@.5:.95                : 0.6685051978486122\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_precision                 : 0.6938100784608138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_recall                    : 0.7333333333333333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_support                   : 15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bus_true_positives            : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                        : 0.6077277097051625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives           : 86.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                    : 0.5710072663690415\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                : 0.37345662345712144\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                 : 0.653504508524589\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                    : 0.5679442508710801\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                   : 287\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives            : 163.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5                    : 0.0019586614173228347\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_mAP@.5:.95                : 0.0013710629921259844\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_support                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cat_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_f1                      : 0.12046726438026806\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_false_positives         : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5                  : 0.09534425350057882\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_mAP@.5:.95              : 0.04975359657552504\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_precision               : 0.10414474680542035\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_recall                  : 0.14285714285714285\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_support                 : 21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     chair_true_positives          : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_f1                        : 0.6385752864796914\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_false_positives           : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5                    : 0.6315079161161754\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_mAP@.5:.95                : 0.448635337005468\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_precision                 : 0.693225032113921\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_recall                    : 0.5919124252457585\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_support                   : 42\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     dog_true_positives            : 25.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_f1                      : 0.7461577488172751\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_false_positives         : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5                  : 0.7406677147195481\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_mAP@.5:.95              : 0.49367273165473097\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_precision               : 0.7929940479762544\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_recall                  : 0.7045454545454546\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_support                 : 44\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     horse_true_positives          : 31.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2104]                   : (10.555757522583008, 46.7541389465332)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [160]         : (0.10796186182296315, 0.3881928397363896)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [160]    : (0.052054444447205424, 0.2682219104200585)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [160]       : (0.44382457585043494, 0.8953283925052866)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [160]          : (0.06820633268954546, 0.3908765220327964)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_f1                     : 0.6569514052656392\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_false_positives        : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5                 : 0.722636500354684\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_mAP@.5:.95             : 0.5521218546470569\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_precision              : 0.6025004415248318\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_recall                 : 0.7222222222222222\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_support                : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     monkey_true_positives         : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                     : 0.3944119970717414\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives        : 943.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                 : 0.33513221738813787\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95             : 0.16605647607022891\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision              : 0.4553504162265951\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                 : 0.34785890130413094\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                : 2264\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives         : 788.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_f1                        : 0.524694496510519\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5                    : 0.5787606177606179\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_mAP@.5:.95                : 0.4434889382239383\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_precision                 : 0.43257424090757424\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_recall                    : 0.6666666666666666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_support                   : 6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     pig_true_positives            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_f1                     : 0.5758558873767194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_false_positives        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5                 : 0.5519919081619643\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_mAP@.5:.95             : 0.44783369842990106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_precision              : 0.7854896965326413\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_recall                 : 0.45454545454545453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_support                : 11\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rabbit_true_positives         : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_f1                      : 0.35456216479673863\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_false_positives         : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5                  : 0.44426582761250955\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_mAP@.5:.95              : 0.2590223514809936\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_precision               : 0.2746679427965979\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_recall                  : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_support                 : 10\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sheep_true_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_f1                        : 0.18557752510136372\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_false_positives           : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5                    : 0.14298872357132958\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_mAP@.5:.95                : 0.07997228671749299\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_precision                 : 0.31766280450490975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_recall                    : 0.13107577149517938\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_support                   : 64\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     toy_true_positives            : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5          : 0.04149069767441859\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_mAP@.5:.95      : 0.018795731874145002\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_support         : 18\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic light_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_f1               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_false_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5           : 0.26059186110982446\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_mAP@.5:.95       : 0.23170975959074896\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_precision        : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_recall           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_support          : 4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     traffic sign_true_positives   : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [160]          : (0.04421229287981987, 0.08795096725225449)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [160]          : (0.02368510700762272, 0.05473887547850609)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [160]          : (0.03353952243924141, 0.04170302301645279)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                      : 0.2041176803627508\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives         : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                  : 0.23073857124123368\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95              : 0.155112993411125\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision               : 0.27929468456144235\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                  : 0.16082802931239173\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                 : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [160]            : (0.05034951865673065, 0.07128961384296417)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [160]            : (0.0250974353402853, 0.037728097289800644)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [160]            : (0.023799344897270203, 0.025914594531059265)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                        : 0.310790678863785\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives           : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                    : 0.35914184824903506\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                : 0.2955962719823514\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                 : 0.7394788968863043\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                    : 0.19673824526953518\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                   : 29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives            : 6.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [160]                   : (0.00034750000000000026, 0.07011406844106464)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [160]                   : (0.00034750000000000026, 0.009740139416983522)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/ff02de26a167432d8de5b06286b5fe54\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_lambda          : [0.0005, 0.0005]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_temperature     : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Old_models          : ['./runs/train/increment_VOC_plain/weights/last.pt', './runs/train/fog_02/weights/last.pt']\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bbox_interval       : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cfg                 : models/yolov5s_openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.225\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     data                : data/openimages.yaml\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     epochs              : 80\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : k_v_2oldmodels_openimages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/k_v_2oldmodels_openimages5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weights             : ./runs/train/increment_VOC_plain/weights/last.pt\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.30 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_LwfPro.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_openimages.yaml \\\n",
    "--data data/openimages.yaml \\\n",
    "--epochs 80 \\\n",
    "--weights ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 5e-4 5e-4 \\\n",
    "--Old_models \\\n",
    "        ./runs/train/increment_VOC_plain/weights/last.pt \\\n",
    "        ./runs/train/fog_02/weights/last.pt \\\n",
    "--name k_v_2oldmodels_openimages \\\n",
    "\n",
    "\"\"\"\n",
    "!{command}\n",
    "#43分钟"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "19e42be4-f64f-4fbe-ac9b-a7f4ffd3a923",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/openimages.yaml, weights=['runs/train/k_v_2oldmodels_openimages5/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/openimages/labels/test.cache... 600 ima\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all        600       1621      0.652      0.306      0.376      0.248\n",
      "                   car        600        113      0.496       0.41      0.418      0.284\n",
      "                   van        600          6          0          0     0.0357     0.0205\n",
      "                 truck        600         17          1      0.269        0.6      0.473\n",
      "                person        600       1131      0.522      0.295      0.343      0.171\n",
      "               bicycle        600         43      0.691      0.372      0.494      0.302\n",
      "                  bird        600         61      0.576      0.492      0.514      0.365\n",
      "                  boat        600         82      0.647      0.336      0.492      0.262\n",
      "                bottle        600          1          1          0   0.000739    0.00041\n",
      "                   bus        600          3      0.252      0.333      0.484      0.387\n",
      "                   cat        600          5          1          0      0.366      0.152\n",
      "                 chair        600         12      0.357      0.333      0.193      0.114\n",
      "                   dog        600         25      0.704       0.72      0.678      0.486\n",
      "                 horse        600         37      0.762      0.779      0.766      0.504\n",
      "                 sheep        600          8      0.521      0.625      0.613      0.449\n",
      "                 train        600          2          1          0    0.00843    0.00321\n",
      "             billboard        600          3          1          0     0.0881     0.0673\n",
      "                rabbit        600          1          0          0     0.0829     0.0746\n",
      "                monkey        600         16      0.723      0.815      0.725      0.481\n",
      "                   pig        600          7      0.506      0.857       0.62      0.451\n",
      "                   toy        600         42      0.586     0.0952      0.188      0.085\n",
      "         traffic light        600          5          1          0     0.0615     0.0308\n",
      "          traffic sign        600          1          1          0      0.497      0.298\n",
      "Speed: 0.1ms pre-process, 3.1ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp251\u001b[0m\n",
      "openimages\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_VOC.yaml, weights=['runs/train/k_v_2oldmodels_openimages5/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.54      0.534      0.563      0.344\n",
      "                   car       4952       1201      0.587      0.898      0.847      0.594\n",
      "                person       4952       4528      0.419      0.697       0.56      0.319\n",
      "             aeroplane       4952        285      0.551       0.28      0.351      0.168\n",
      "               bicycle       4952        337      0.507      0.843      0.801      0.516\n",
      "                  bird       4952        459      0.553      0.749      0.709      0.418\n",
      "                  boat       4952        263      0.284      0.749      0.612      0.322\n",
      "                bottle       4952        469      0.558      0.393      0.432      0.258\n",
      "                   bus       4952        213      0.663      0.793      0.798      0.589\n",
      "                   cat       4952        358      0.731      0.606      0.692      0.423\n",
      "                 chair       4952        756      0.465      0.484      0.468      0.271\n",
      "                   cow       4952        244      0.643      0.347      0.486      0.314\n",
      "           diningtable       4952        206      0.406      0.102      0.226     0.0859\n",
      "                   dog       4952        489      0.642      0.719      0.751      0.462\n",
      "                 horse       4952        348      0.644      0.816      0.831       0.54\n",
      "             motorbike       4952        325      0.682      0.205      0.516      0.263\n",
      "           pottedplant       4952        480      0.184     0.0375     0.0543     0.0247\n",
      "                 sheep       4952        242      0.405      0.727      0.558      0.348\n",
      "                  sofa       4952        239      0.536      0.435      0.497      0.308\n",
      "                 train       4952        282       0.79      0.532      0.685      0.434\n",
      "             tvmonitor       4952        308      0.558       0.26      0.384      0.223\n",
      "Speed: 0.1ms pre-process, 1.5ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp253\u001b[0m\n",
      "Voc\n",
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/val_kitti.yaml, weights=['runs/train/k_v_2oldmodels_openimages5/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3090, 24135MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_openimages summary: 160 layers, 7107223 parameters, 0 gradients, 16.1 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.769      0.185      0.226      0.113\n",
      "                   car       2244       8711      0.694      0.667      0.706      0.355\n",
      "                   van       2244        861      0.703     0.0909      0.267      0.149\n",
      "                 truck       2244        333      0.383      0.264      0.258      0.162\n",
      "                  tram       2244        138          1          0     0.0627     0.0303\n",
      "                person       2244       1286      0.376      0.456      0.332      0.148\n",
      "        person_sitting       2244         89          1          0      0.089     0.0331\n",
      "               cyclist       2244        496          1          0     0.0644     0.0158\n",
      "                  misc       2244        284          1          0     0.0318    0.00893\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp255\u001b[0m\n",
      "kitti\n"
     ]
    }
   ],
   "source": [
    "# 1e-4 1e-3\n",
    "model = f'runs/train/k_v_2oldmodels_openimages5/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/openimages.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'openimages' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "# Voc\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_VOC.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Voc' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# kitti\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/val_kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'kitti' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5e4187d6-c378-4246-96eb-90017e78d109",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
