{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f58f27ee-900b-4c08-bd6e-eb8f478297d3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp147/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 160 layers, 7080253 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.68       0.62      0.652      0.387\n",
      "                   car       4952       1201      0.757       0.85      0.867      0.611\n",
      "                person       4952       4528      0.737      0.788      0.797      0.465\n",
      "             aeroplane       4952        285      0.644      0.712      0.704      0.389\n",
      "               bicycle       4952        337      0.841      0.656      0.762      0.446\n",
      "                  bird       4952        459      0.578      0.551      0.533      0.277\n",
      "                  boat       4952        263       0.45      0.544      0.461      0.234\n",
      "                bottle       4952        469      0.709      0.475      0.526      0.324\n",
      "                   bus       4952        213      0.725      0.643      0.734      0.511\n",
      "                   cat       4952        358      0.768      0.528      0.623      0.369\n",
      "                 chair       4952        756      0.554      0.493      0.496      0.284\n",
      "                   cow       4952        244      0.543      0.701      0.659       0.39\n",
      "           diningtable       4952        206      0.674      0.541      0.605       0.32\n",
      "                   dog       4952        489       0.71      0.479      0.627      0.352\n",
      "                 horse       4952        348      0.707      0.791      0.795      0.493\n",
      "             motorbike       4952        325      0.748      0.726      0.744      0.444\n",
      "           pottedplant       4952        480      0.659      0.383      0.421      0.193\n",
      "                 sheep       4952        242      0.645      0.637      0.637      0.397\n",
      "                  sofa       4952        239      0.769      0.515      0.659      0.411\n",
      "                 train       4952        282      0.701      0.741      0.735      0.419\n",
      "             tvmonitor       4952        308      0.684      0.653       0.66      0.411\n",
      "Speed: 0.1ms pre-process, 1.6ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp199\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp147/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "25bdfda2-0522-43d4-bfb0-6644429b1d35",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp147/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 160 layers, 7080253 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.932      0.152      0.208      0.112\n",
      "                   car       2244       8711      0.828      0.716       0.79       0.46\n",
      "                   van       2244        861          1          0     0.0635     0.0392\n",
      "                 truck       2244        333          1          0      0.275      0.162\n",
      "                  tram       2244        138          1          0    0.00425     0.0023\n",
      "                person       2244       1286      0.629        0.5      0.495      0.218\n",
      "        person_sitting       2244         89          1          0   0.000794   0.000282\n",
      "               cyclist       2244        496          1          0     0.0147    0.00505\n",
      "                  misc       2244        284          1          0     0.0167    0.00987\n",
      "Speed: 0.0ms pre-process, 0.8ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp198\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/exp147/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "637ffcd5-daf2-4713-bbd5-e5d579ab35e3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 全用太多了.后面测试的时候只使用images/train2007"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "ef973084-2bdd-4880-ab87-5f7116f5db61",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/95f44e9e2f174faa9eb5b6c0d97daed5\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp27/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp27\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49       3.5G     0.1116    0.04275    0.07703         32        640: 1\n",
      "tensor([36.97710], device='cuda:0', grad_fn=<AddBackward0>) tensor(3577.68555, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675    0.00333      0.198    0.00282   0.000678\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49       3.5G     0.1069    0.04257    0.07529         14        640: 1\n",
      "tensor([29.45456], device='cuda:0', grad_fn=<AddBackward0>) tensor(2877.55933, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675    0.00439      0.273    0.00789    0.00218\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49       3.5G     0.0979     0.0447    0.07636         33        640: 1\n",
      "tensor([57.64411], device='cuda:0', grad_fn=<AddBackward0>) tensor(5650.19824, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675    0.00609      0.359     0.0274    0.00747\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49       3.5G    0.08788    0.04869    0.07339         29        640: 1\n",
      "tensor([54.30187], device='cuda:0', grad_fn=<AddBackward0>) tensor(5330.20801, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675    0.00853      0.433     0.0468     0.0141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49       3.5G    0.08219    0.04975    0.07413         21        640: 1\n",
      "tensor([56.35284], device='cuda:0', grad_fn=<AddBackward0>) tensor(5536.43457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0093      0.484     0.0647     0.0224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49       3.5G    0.07919    0.05136    0.07415         21        640: 1\n",
      "tensor([49.82849], device='cuda:0', grad_fn=<AddBackward0>) tensor(4876.21631, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.178      0.118     0.0783     0.0313\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49       3.5G    0.07681    0.05135    0.07253         37        640: 1\n",
      "tensor([43.85596], device='cuda:0', grad_fn=<AddBackward0>) tensor(4279.93799, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.482     0.0653     0.0967     0.0369\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49       3.5G    0.07537    0.05072    0.07276         24        640: 1\n",
      "tensor([45.80349], device='cuda:0', grad_fn=<AddBackward0>) tensor(4478.20752, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.603      0.053      0.115     0.0415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49       3.5G    0.07351    0.05059    0.07146         13        640: 1\n",
      "tensor([47.97222], device='cuda:0', grad_fn=<AddBackward0>) tensor(4703.92969, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.557     0.0641      0.131     0.0453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49       3.5G    0.07241    0.05006    0.07238         22        640: 1\n",
      "tensor([44.11880], device='cuda:0', grad_fn=<AddBackward0>) tensor(4340.58594, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.554     0.0762      0.132     0.0489\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49       3.5G    0.07165    0.04961     0.0721         32        640: 1\n",
      "tensor([40.43542], device='cuda:0', grad_fn=<AddBackward0>) tensor(3943.64160, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.605     0.0904      0.152     0.0566\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49       3.5G    0.07096    0.04958    0.07233         24        640: 1\n",
      "tensor([47.30872], device='cuda:0', grad_fn=<AddBackward0>) tensor(4630.38916, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.529      0.106      0.164     0.0636\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49       3.5G    0.07095    0.04887    0.07287         33        640: 1\n",
      "tensor([35.00758], device='cuda:0', grad_fn=<AddBackward0>) tensor(3399.35864, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.474      0.114      0.176     0.0661\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49       3.5G    0.06978     0.0496    0.07074         15        640: 1\n",
      "tensor([43.13663], device='cuda:0', grad_fn=<AddBackward0>) tensor(4249.86230, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.461      0.111      0.178     0.0706\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49       3.5G    0.06872    0.04904    0.07137         15        640: 1\n",
      "tensor([39.32468], device='cuda:0', grad_fn=<AddBackward0>) tensor(3847.92358, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.473      0.147      0.192      0.074\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49       3.5G     0.0686    0.04934    0.07241         27        640: 1\n",
      "tensor([39.18357], device='cuda:0', grad_fn=<AddBackward0>) tensor(3834.43091, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.452      0.165      0.205     0.0819\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49       3.5G    0.06854    0.04918     0.0725         16        640: 1\n",
      "tensor([32.80934], device='cuda:0', grad_fn=<AddBackward0>) tensor(3186.25220, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.475      0.179      0.217     0.0838\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49       3.5G    0.06805    0.04923    0.07131         31        640: 1\n",
      "tensor([36.40209], device='cuda:0', grad_fn=<AddBackward0>) tensor(3536.12671, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.444      0.186      0.227     0.0871\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49       3.5G    0.06803    0.04886    0.07161         24        640: 1\n",
      "tensor([32.22002], device='cuda:0', grad_fn=<AddBackward0>) tensor(3145.35303, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.436      0.201      0.224     0.0889\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49       3.5G    0.06747    0.04906    0.07184         28        640: 1\n",
      "tensor([36.39105], device='cuda:0', grad_fn=<AddBackward0>) tensor(3530.29980, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.481      0.225      0.253      0.094\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49       3.5G    0.06713    0.04818    0.07176          9        640: 1\n",
      "tensor([28.34808], device='cuda:0', grad_fn=<AddBackward0>) tensor(2743.65894, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.456      0.227      0.251     0.0952\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49       3.5G    0.06685    0.04888     0.0712         37        640: 1\n",
      "tensor([30.61442], device='cuda:0', grad_fn=<AddBackward0>) tensor(2958.21802, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.431      0.232      0.248     0.0972\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49       3.5G    0.06651    0.04819     0.0715         26        640: 1\n",
      "tensor([27.05368], device='cuda:0', grad_fn=<AddBackward0>) tensor(2601.45557, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.491       0.25      0.271      0.105\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49       3.5G    0.06614    0.04835    0.07205         25        640: 1\n",
      "tensor([26.34585], device='cuda:0', grad_fn=<AddBackward0>) tensor(2553.30371, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.469      0.256      0.272      0.106\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49       3.5G    0.06577    0.04726    0.07153         30        640: 1\n",
      "tensor([24.26698], device='cuda:0', grad_fn=<AddBackward0>) tensor(2336.52246, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.486      0.259      0.281      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49       3.5G    0.06617    0.04696      0.072         19        640: 1\n",
      "tensor([33.66571], device='cuda:0', grad_fn=<AddBackward0>) tensor(3280.81665, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.474      0.259      0.275       0.11\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49       3.5G    0.06572    0.04728     0.0718         14        640: 1\n",
      "tensor([27.15462], device='cuda:0', grad_fn=<AddBackward0>) tensor(2637.37915, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.474      0.259      0.277      0.111\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49       3.5G    0.06575      0.048    0.07147         38        640: 1\n",
      "tensor([26.55750], device='cuda:0', grad_fn=<AddBackward0>) tensor(2547.25513, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.491      0.261      0.284      0.112\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49       3.5G    0.06552    0.04735    0.07218         20        640: 1\n",
      "tensor([21.73260], device='cuda:0', grad_fn=<AddBackward0>) tensor(2083.47461, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.511      0.269      0.297      0.118\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49       3.5G    0.06559    0.04822    0.07175         30        640: 1\n",
      "tensor([29.96856], device='cuda:0', grad_fn=<AddBackward0>) tensor(2889.50708, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.505      0.272      0.299      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49       3.5G    0.06568     0.0476     0.0717         21        640: 1\n",
      "tensor([25.35637], device='cuda:0', grad_fn=<AddBackward0>) tensor(2444.72583, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.483      0.272      0.299      0.122\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49       3.5G    0.06536    0.04657    0.07164         29        640: 1\n",
      "tensor([28.65964], device='cuda:0', grad_fn=<AddBackward0>) tensor(2780.02441, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.507      0.286      0.304      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49       3.5G    0.06517    0.04821    0.07202         28        640: 1\n",
      "tensor([20.34182], device='cuda:0', grad_fn=<AddBackward0>) tensor(1958.19531, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.519      0.287      0.315      0.127\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49       3.5G    0.06429    0.04726    0.07243         19        640: 1\n",
      "tensor([16.96311], device='cuda:0', grad_fn=<AddBackward0>) tensor(1623.60278, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.504      0.282      0.311      0.126\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49       3.5G    0.06509    0.04743    0.07258         22        640: 1\n",
      "tensor([15.26826], device='cuda:0', grad_fn=<AddBackward0>) tensor(1444.67871, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.513      0.298      0.319      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49       3.5G    0.06483    0.04825    0.07176         32        640: 1\n",
      "tensor([19.07119], device='cuda:0', grad_fn=<AddBackward0>) tensor(1809.22021, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.511      0.286      0.319      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49       3.5G    0.06423    0.04819    0.07137         18        640: 1\n",
      "tensor([21.02203], device='cuda:0', grad_fn=<AddBackward0>) tensor(2014.21851, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.479      0.284      0.315      0.132\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49       3.5G    0.06424    0.04794    0.07239         33        640: 1\n",
      "tensor([14.81208], device='cuda:0', grad_fn=<AddBackward0>) tensor(1375.17700, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.515      0.299      0.322      0.134\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49       3.5G     0.0647    0.04822    0.07194         38        640: 1\n",
      "tensor([12.51960], device='cuda:0', grad_fn=<AddBackward0>) tensor(1154.92456, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.539      0.302       0.33      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49       3.5G    0.06416    0.04792    0.07259         46        640: 1\n",
      "tensor([11.27448], device='cuda:0', grad_fn=<AddBackward0>) tensor(1025.49707, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.528      0.309      0.327      0.139\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49       3.5G    0.06464    0.04682    0.07281         27        640: 1\n",
      "tensor([12.75064], device='cuda:0', grad_fn=<AddBackward0>) tensor(1197.91309, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.525      0.295       0.33      0.141\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49       3.5G    0.06418    0.04824    0.07197         18        640: 1\n",
      "tensor([12.32114], device='cuda:0', grad_fn=<AddBackward0>) tensor(1144.06177, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.547      0.304      0.332      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49       3.5G    0.06442    0.04649    0.07292         24        640: 1\n",
      "tensor([11.66621], device='cuda:0', grad_fn=<AddBackward0>) tensor(1066.83313, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.544      0.306      0.336      0.144\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49       3.5G    0.06434    0.04866     0.0721         21        640: 1\n",
      "tensor([8.58394], device='cuda:0', grad_fn=<AddBackward0>) tensor(781.29254, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.536      0.315      0.335      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49       3.5G    0.06428    0.04809    0.07313         28        640: 1\n",
      "tensor([9.61071], device='cuda:0', grad_fn=<AddBackward0>) tensor(885.58667, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.531      0.317      0.339      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49       3.5G    0.06453    0.04759    0.07325         34        640: 1\n",
      "tensor([10.27733], device='cuda:0', grad_fn=<AddBackward0>) tensor(937.09717, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.555       0.31      0.342      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49       3.5G    0.06422      0.047    0.07343         29        640: 1\n",
      "tensor([8.42295], device='cuda:0', grad_fn=<AddBackward0>) tensor(745.43768, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.542       0.32      0.342      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49       3.5G    0.06399    0.04747    0.07412         23        640: 1\n",
      "tensor([12.24125], device='cuda:0', grad_fn=<AddBackward0>) tensor(1132.72070, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.532      0.318      0.341      0.147\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49       3.5G    0.06407    0.04738    0.07306         28        640: 1\n",
      "tensor([6.66237], device='cuda:0', grad_fn=<AddBackward0>) tensor(585.62811, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.536      0.321      0.347      0.149\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49       3.5G    0.06373     0.0468    0.07323         19        640: 1\n",
      "tensor([7.35775], device='cuda:0', grad_fn=<AddBackward0>) tensor(657.86066, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.545      0.322      0.346      0.149\n",
      "\n",
      "50 epochs completed in 0.391 hours.\n",
      "Optimizer stripped from runs/train/exp27/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp27/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp27/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.541      0.322      0.346      0.149\n",
      "                   car       1048       4012      0.738      0.285      0.418      0.227\n",
      "                   van       1048        431      0.669       0.32      0.418      0.213\n",
      "                 truck       1048        166       0.45      0.313      0.313      0.132\n",
      "                  tram       1048         56      0.547      0.589      0.518      0.209\n",
      "                person       1048        618      0.433      0.316      0.337      0.136\n",
      "        person_sitting       1048         20      0.341       0.45      0.301       0.12\n",
      "               cyclist       1048        234       0.54      0.145      0.218     0.0634\n",
      "                  misc       1048        138      0.612      0.159      0.247     0.0925\n",
      "Results saved to \u001b[1mruns/train/exp27\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/95f44e9e2f174faa9eb5b6c0d97daed5\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.4116413312957764\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 406.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.4176008625170813\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.2266507218847847\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.7382292222548457\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.285387655851265\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 1145.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.2289691380014089\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 29.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.21818752244663336\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.06338363605193315\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 0.5398260182011675\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.1452991452991453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 34.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (7.907039642333984, 346.5867614746094)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.0028247830261713906, 0.34691432501706426)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.0006780904115219227, 0.14914351739498705)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.003333728981864366, 0.6051005512903556)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.053007950208455994, 0.48416021315978786)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.25297520116984956\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.24667797331909536\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.09250661279577482\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 0.6123001308186494\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.15942028985507245\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 22.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.3649693175793947\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 256.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.33723662748243194\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.13592414964610694\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.43277267412768766\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.3155339805825243\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.3878836947442638\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 17.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.3010321026472294\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.11998366193544716\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 0.3408359797248686\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.45\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 195.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.06373090296983719, 0.11162934452295303)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.0707421749830246, 0.07702714204788208)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.0425729863345623, 0.05136047303676605)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.5673901560131023\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 27.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.518237132378164\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.20858442963634483\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 0.5470634137157002\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.5892857142857143\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 33.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.3694931356153941\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 63.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.3132483096247045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.13177080454661072\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 0.45034643338991165\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.3132530120481928\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 52.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.06410126388072968, 0.11522421985864639)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.014560237526893616, 0.0437193289399147)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.07259530574083328, 0.1173403188586235)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.4330125607322207\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 68.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.41849831395754533\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.21348520577763144\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 0.6686211562155467\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.32018561484918795\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 138.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/95f44e9e2f174faa9eb5b6c0d97daed5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp27\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.94 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-2 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ef4f78ec-9e3e-4464-9e63-56db6cda5d70",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d40159b0-7d5b-4314-8459-2e1f1e7768d8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "221882ff-50b5-405a-956d-4d429a087488",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0005, Lwf_temperature=1.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/b77a3dbf789d45da98c5a883eb2169c5\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp30/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp30\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49       3.5G     0.1122    0.04338    0.08232        112        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "       0/49       3.5G    0.08867    0.04875    0.07403         32        640: 1\n",
      "tensor([3.03726], device='cuda:0', grad_fn=<AddBackward0>) tensor(3895.98242, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0207      0.341     0.0502     0.0147\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49       3.5G    0.07302    0.05034    0.06772         14        640: 1\n",
      "tensor([2.66903], device='cuda:0', grad_fn=<AddBackward0>) tensor(4221.94678, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0148      0.487      0.101     0.0398\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49       3.5G    0.06638    0.04741    0.06512         33        640: 1\n",
      "tensor([3.48934], device='cuda:0', grad_fn=<AddBackward0>) tensor(5020.16699, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.763      0.116      0.202     0.0837\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49       3.5G    0.06051    0.04557    0.06036         29        640: 1\n",
      "tensor([3.97402], device='cuda:0', grad_fn=<AddBackward0>) tensor(6378.38477, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.632      0.252      0.345      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49       3.5G    0.05663    0.04365    0.05892         21        640: 1\n",
      "tensor([3.93354], device='cuda:0', grad_fn=<AddBackward0>) tensor(6258.83398, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.614      0.375      0.419      0.195\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49       3.5G    0.05485    0.04363    0.05681         21        640: 1\n",
      "tensor([3.64905], device='cuda:0', grad_fn=<AddBackward0>) tensor(5692.17334, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.598      0.425      0.478      0.223\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49       3.5G    0.05307     0.0427    0.05453         37        640: 1\n",
      "tensor([3.04494], device='cuda:0', grad_fn=<AddBackward0>) tensor(4460.13574, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.631      0.441      0.503      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49       3.5G    0.05158    0.04211    0.05314         24        640: 1\n",
      "tensor([3.34154], device='cuda:0', grad_fn=<AddBackward0>) tensor(5045.34863, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.638      0.446      0.526      0.259\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49       3.5G    0.05057    0.04155     0.0508         13        640: 1\n",
      "tensor([3.48110], device='cuda:0', grad_fn=<AddBackward0>) tensor(5452.91113, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.643      0.455      0.534      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49       3.5G    0.04988    0.04109    0.05024         22        640: 1\n",
      "tensor([2.80137], device='cuda:0', grad_fn=<AddBackward0>) tensor(4612.17627, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.653       0.49      0.565      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49       3.5G    0.04905     0.0407    0.04855         32        640: 1\n",
      "tensor([3.11077], device='cuda:0', grad_fn=<AddBackward0>) tensor(4861.05811, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.681      0.482      0.582      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49       3.5G    0.04879    0.04085    0.04826         24        640: 1\n",
      "tensor([3.27139], device='cuda:0', grad_fn=<AddBackward0>) tensor(5111.90820, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.629      0.485      0.559      0.275\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49       3.5G    0.04835    0.04015     0.0474         33        640: 1\n",
      "tensor([2.66139], device='cuda:0', grad_fn=<AddBackward0>) tensor(3914.34424, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.639      0.533      0.586       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49       3.5G    0.04753     0.0407    0.04515         15        640: 1\n",
      "tensor([2.69769], device='cuda:0', grad_fn=<AddBackward0>) tensor(4476.50635, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.636      0.485      0.564      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49       3.5G    0.04684    0.04028    0.04491         15        640: 1\n",
      "tensor([2.89158], device='cuda:0', grad_fn=<AddBackward0>) tensor(4510.27539, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.69      0.493      0.567      0.275\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49       3.5G    0.04652    0.04057     0.0442         27        640: 1\n",
      "tensor([2.62716], device='cuda:0', grad_fn=<AddBackward0>) tensor(4190.68701, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.632      0.519      0.589      0.287\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49       3.5G    0.04627    0.04045    0.04364         16        640: 1\n",
      "tensor([2.61441], device='cuda:0', grad_fn=<AddBackward0>) tensor(3935.97510, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.623      0.549        0.6      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49       3.5G    0.04609    0.04039    0.04251         31        640: 1\n",
      "tensor([2.79281], device='cuda:0', grad_fn=<AddBackward0>) tensor(4203.66895, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.688      0.511      0.613      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49       3.5G    0.04603    0.04023    0.04134         24        640: 1\n",
      "tensor([2.43849], device='cuda:0', grad_fn=<AddBackward0>) tensor(3773.51367, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.66       0.53      0.604      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49       3.5G    0.04538    0.04002    0.04067         28        640: 1\n",
      "tensor([3.06703], device='cuda:0', grad_fn=<AddBackward0>) tensor(4331.02979, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.629      0.534      0.599      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49       3.5G     0.0457    0.03946    0.04057          9        640: 1\n",
      "tensor([2.51405], device='cuda:0', grad_fn=<AddBackward0>) tensor(3677.88135, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.631      0.531       0.59      0.288\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49       3.5G     0.0449    0.03993     0.0394         37        640: 1\n",
      "tensor([2.53804], device='cuda:0', grad_fn=<AddBackward0>) tensor(3721.51318, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.648      0.518      0.591       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49       3.5G    0.04447     0.0395    0.03997         26        640: 1\n",
      "tensor([2.35004], device='cuda:0', grad_fn=<AddBackward0>) tensor(3400.77417, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.649      0.561      0.605      0.285\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49       3.5G    0.04418    0.03989    0.03978         25        640: 1\n",
      "tensor([2.45016], device='cuda:0', grad_fn=<AddBackward0>) tensor(3777.17505, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.672      0.558      0.634      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49       3.5G    0.04405    0.03857    0.03889         30        640: 1\n",
      "tensor([2.23148], device='cuda:0', grad_fn=<AddBackward0>) tensor(3204.89453, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.673      0.563      0.629      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49       3.5G    0.04386    0.03833    0.03816         19        640: 1\n",
      "tensor([2.45290], device='cuda:0', grad_fn=<AddBackward0>) tensor(3836.87524, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.629      0.561      0.605      0.282\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49       3.5G    0.04377     0.0388    0.03811         14        640: 1\n",
      "tensor([2.25091], device='cuda:0', grad_fn=<AddBackward0>) tensor(3499.70117, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.659      0.574      0.628      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49       3.5G    0.04353    0.03905    0.03669         38        640: 1\n",
      "tensor([2.40228], device='cuda:0', grad_fn=<AddBackward0>) tensor(3423.27246, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.689      0.561      0.643      0.309\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49       3.5G    0.04299    0.03867    0.03745         20        640: 1\n",
      "tensor([1.90263], device='cuda:0', grad_fn=<AddBackward0>) tensor(2766.26587, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.686      0.577      0.653      0.313\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49       3.5G    0.04339    0.03952    0.03644         30        640: 1\n",
      "tensor([2.80380], device='cuda:0', grad_fn=<AddBackward0>) tensor(4034.83325, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.687      0.568      0.655      0.316\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49       3.5G    0.04339    0.03883    0.03641         21        640: 1\n",
      "tensor([2.42336], device='cuda:0', grad_fn=<AddBackward0>) tensor(3730.29004, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.685      0.573      0.644      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49       3.5G    0.04269    0.03808     0.0363         29        640: 1\n",
      "tensor([2.43588], device='cuda:0', grad_fn=<AddBackward0>) tensor(3769.41260, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.681      0.545      0.641      0.326\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49       3.5G    0.04279    0.03944    0.03596         28        640: 1\n",
      "tensor([1.99616], device='cuda:0', grad_fn=<AddBackward0>) tensor(3085.34985, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.705      0.574      0.652      0.318\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49       3.5G    0.04238     0.0387    0.03657         19        640: 1\n",
      "tensor([1.65820], device='cuda:0', grad_fn=<AddBackward0>) tensor(2328.60840, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.683      0.567      0.643      0.314\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49       3.5G    0.04274    0.03854    0.03551         22        640: 1\n",
      "tensor([1.71241], device='cuda:0', grad_fn=<AddBackward0>) tensor(2513.43457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.708      0.559      0.643      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49       3.5G    0.04222     0.0392    0.03434         32        640: 1\n",
      "tensor([1.97396], device='cuda:0', grad_fn=<AddBackward0>) tensor(2808.60400, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.692      0.576      0.651      0.319\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49       3.5G    0.04184    0.03894    0.03452         18        640: 1\n",
      "tensor([2.20118], device='cuda:0', grad_fn=<AddBackward0>) tensor(3146.89478, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.665      0.557      0.625      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49       3.5G     0.0415    0.03878    0.03479         33        640: 1\n",
      "tensor([1.76865], device='cuda:0', grad_fn=<AddBackward0>) tensor(2341.62256, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.681      0.589      0.653      0.319\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49       3.5G    0.04131    0.03907    0.03383         38        640: 1\n",
      "tensor([1.67620], device='cuda:0', grad_fn=<AddBackward0>) tensor(2167.72144, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.694        0.6      0.667       0.32\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49       3.5G    0.04136    0.03878    0.03394         46        640: 1\n",
      "tensor([1.66080], device='cuda:0', grad_fn=<AddBackward0>) tensor(2094.25879, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.69      0.575      0.656      0.317\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49       3.5G    0.04171    0.03781    0.03362         27        640: 1\n",
      "tensor([1.55313], device='cuda:0', grad_fn=<AddBackward0>) tensor(2162.12817, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.712      0.596      0.668      0.333\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49       3.5G    0.04119    0.03913    0.03374         18        640: 1\n",
      "tensor([1.49815], device='cuda:0', grad_fn=<AddBackward0>) tensor(1991.08740, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.702      0.594      0.678      0.326\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49       3.5G     0.0412    0.03758    0.03458         24        640: 1\n",
      "tensor([1.64257], device='cuda:0', grad_fn=<AddBackward0>) tensor(1950.27783, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.72        0.6      0.682      0.325\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49       3.5G    0.04134    0.03932    0.03376         21        640: 1\n",
      "tensor([1.34967], device='cuda:0', grad_fn=<AddBackward0>) tensor(1697.11084, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.712      0.603      0.678      0.334\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49       3.5G     0.0409    0.03891    0.03335         28        640: 1\n",
      "tensor([1.40998], device='cuda:0', grad_fn=<AddBackward0>) tensor(1803.39368, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.717      0.604      0.674      0.327\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49       3.5G    0.04137    0.03862     0.0336         34        640: 1\n",
      "tensor([1.50877], device='cuda:0', grad_fn=<AddBackward0>) tensor(1867.42761, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.722        0.6      0.676      0.318\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49       3.5G    0.04105    0.03819    0.03364         29        640: 1\n",
      "tensor([1.45606], device='cuda:0', grad_fn=<AddBackward0>) tensor(1578.07141, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.71      0.596      0.675      0.323\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49       3.5G    0.04051     0.0382    0.03332         23        640: 1\n",
      "tensor([1.55070], device='cuda:0', grad_fn=<AddBackward0>) tensor(2001.81299, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.687      0.588      0.665      0.324\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49       3.5G    0.04034    0.03815    0.03272         28        640: 1\n",
      "tensor([1.21246], device='cuda:0', grad_fn=<AddBackward0>) tensor(1358.04138, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.698      0.607      0.671      0.327\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49       3.5G     0.0404    0.03783    0.03373         19        640: 1\n",
      "tensor([1.18326], device='cuda:0', grad_fn=<AddBackward0>) tensor(1399.40320, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.713        0.6      0.678      0.333\n",
      "\n",
      "50 epochs completed in 0.467 hours.\n",
      "Optimizer stripped from runs/train/exp30/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp30/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp30/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.712      0.604      0.679      0.334\n",
      "                   car       1048       4012      0.695      0.844      0.845      0.526\n",
      "                   van       1048        431      0.853      0.691      0.813      0.457\n",
      "                 truck       1048        166      0.943      0.735      0.848      0.435\n",
      "                  tram       1048         56       0.81      0.763      0.834      0.404\n",
      "                person       1048        618      0.421      0.652      0.563      0.245\n",
      "        person_sitting       1048         20      0.431        0.4      0.406      0.153\n",
      "               cyclist       1048        234      0.686      0.436      0.528      0.162\n",
      "                  misc       1048        138      0.859      0.309      0.595      0.292\n",
      "Results saved to \u001b[1mruns/train/exp30\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/b77a3dbf789d45da98c5a883eb2169c5\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.762262798042558\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 1487.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.8449166424595804\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.5256315844003774\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.6948848284358677\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.844110062205775\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 3387.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.5331712133181717\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 47.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.5280546350119666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.1622791749192952\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 0.6863311495280752\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.4358974358974359\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 102.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (1.6425731182098389, 21.034608840942383)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.0502482851050108, 0.6821248552551233)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.014704541115904097, 0.3341912554732781)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.014826538173427491, 0.7628565545960487)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.11642804068741311, 0.6073633969702973)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.4548820159994403\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.5948849293447189\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.2917541269111375\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 0.8591089791089791\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.3093343673053817\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 43.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.5120094497926118\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 553.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.5625760810763305\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.24502768706743283\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.4214644874161699\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.6521035598705501\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.4148807012318818\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.4058891538328039\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.15314570554032464\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 0.43091135921670143\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 403.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.04033707082271576, 0.08867204934358597)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.032723963260650635, 0.0740334764122963)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.03757818415760994, 0.050338029861450195)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.7862238723020324\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.8338196424337857\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.40400788400442994\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 0.8104211238721282\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.7634296739135449\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 43.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.8261264182790903\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 7.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.8484298471320624\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.43462413632543984\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 0.9431459840402656\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.7349397590361446\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 122.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.04010298103094101, 0.09816698729991913)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.013375272043049335, 0.04232987388968468)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.06025245040655136, 0.11762214452028275)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.7636097645667237\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 52.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.8131067249834961\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.45712232857181256\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 0.8526383993972061\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.691415313225058\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 298.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/b77a3dbf789d45da98c5a883eb2169c5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp30\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.08 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 5e-4 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "75181608-5d78-42ac-b52a-6c59f9e5ad67",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp30/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.39      0.347      0.313      0.147\n",
      "                   car       4952       1201      0.634      0.707      0.715      0.449\n",
      "                person       4952       4528      0.591      0.607      0.602      0.278\n",
      "             aeroplane       4952        285      0.348      0.389      0.308      0.119\n",
      "               bicycle       4952        337      0.506      0.436      0.449      0.217\n",
      "                  bird       4952        459      0.284      0.185      0.147     0.0607\n",
      "                  boat       4952        263      0.193      0.137      0.119     0.0473\n",
      "                bottle       4952        469      0.366      0.275      0.214     0.0885\n",
      "                   bus       4952        213      0.427       0.46      0.383      0.242\n",
      "                   cat       4952        358      0.379      0.271      0.275      0.106\n",
      "                 chair       4952        756      0.432      0.233      0.223     0.0914\n",
      "                   cow       4952        244       0.37       0.41       0.31      0.173\n",
      "           diningtable       4952        206      0.364      0.184      0.187     0.0576\n",
      "                   dog       4952        489      0.347       0.11      0.195     0.0836\n",
      "                 horse       4952        348      0.433      0.494      0.416      0.155\n",
      "             motorbike       4952        325      0.467      0.511      0.472      0.213\n",
      "           pottedplant       4952        480      0.264      0.252      0.196     0.0682\n",
      "                 sheep       4952        242      0.309       0.38      0.277      0.148\n",
      "                  sofa       4952        239      0.385      0.117      0.145     0.0702\n",
      "                 train       4952        282      0.368      0.472      0.366      0.141\n",
      "             tvmonitor       4952        308      0.324      0.302      0.254      0.132\n",
      "Speed: 0.1ms pre-process, 1.6ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp148\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp30/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "65f73e8b-39e8-4e6b-aa51-287a4ae50eec",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09a36d04-7064-4e98-a43e-df1b0a55896a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "92a7aa43-34fd-40c5-9fb7-f538cfb3a45d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "274ee360-ba9b-4f7d-915b-02d9b1645e3f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "22120e24-c5af-4807-a5f8-78af8ae8e626",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=5e-05, Lwf_temperature=1.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/9e2aa907f14b472c800c4ee331543c15\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp34/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp34\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.51G    0.08283    0.04915    0.07221         32        640: 1\n",
      "tensor([1.49701], device='cuda:0', grad_fn=<AddBackward0>) tensor(10539.90430, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0407      0.192     0.0545     0.0276\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.51G    0.06116    0.04429     0.0578         14        640: 1\n",
      "tensor([0.90606], device='cuda:0', grad_fn=<AddBackward0>) tensor(8701.10938, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.896      0.104       0.16     0.0825\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.51G    0.05613    0.03997    0.05305         33        640: 1\n",
      "tensor([1.23270], device='cuda:0', grad_fn=<AddBackward0>) tensor(8496.83301, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.785      0.164      0.285      0.123\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.51G    0.05164    0.03843     0.0461         29        640: 1\n",
      "tensor([1.17158], device='cuda:0', grad_fn=<AddBackward0>) tensor(10722.27148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.783      0.233      0.374      0.157\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.51G    0.04881    0.03711    0.04068         21        640: 1\n",
      "tensor([1.23779], device='cuda:0', grad_fn=<AddBackward0>) tensor(10998.97363, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.738      0.182      0.366      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.51G     0.0469    0.03728    0.03551         21        640: 1\n",
      "tensor([1.19735], device='cuda:0', grad_fn=<AddBackward0>) tensor(11396.13184, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911      0.134      0.305      0.129\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.51G    0.04552    0.03631    0.03212         37        640: 1\n",
      "tensor([1.18664], device='cuda:0', grad_fn=<AddBackward0>) tensor(10292.86230, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.914      0.143      0.324      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.51G    0.04371    0.03552    0.02962         24        640: 1\n",
      "tensor([1.19638], device='cuda:0', grad_fn=<AddBackward0>) tensor(11927.24316, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.136      0.288      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.51G    0.04303    0.03475    0.02689         13        640: 1\n",
      "tensor([1.20691], device='cuda:0', grad_fn=<AddBackward0>) tensor(11102.20215, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.912      0.142      0.287       0.13\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.51G    0.04165    0.03425     0.0259         22        640: 1\n",
      "tensor([0.88256], device='cuda:0', grad_fn=<AddBackward0>) tensor(10590.06543, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.141      0.295      0.147\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.51G    0.04129     0.0334    0.02414         32        640: 1\n",
      "tensor([1.13837], device='cuda:0', grad_fn=<AddBackward0>) tensor(11492.26758, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.916      0.137      0.287      0.143\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.51G     0.0404    0.03331    0.02325         24        640: 1\n",
      "tensor([1.08149], device='cuda:0', grad_fn=<AddBackward0>) tensor(11913.55859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.92      0.141      0.296      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.51G    0.04018    0.03265    0.02176         33        640: 1\n",
      "tensor([0.99868], device='cuda:0', grad_fn=<AddBackward0>) tensor(9736.87598, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.922       0.14      0.267      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.51G    0.03912    0.03269    0.01961         15        640: 1\n",
      "tensor([0.88958], device='cuda:0', grad_fn=<AddBackward0>) tensor(11464.89941, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923      0.141        0.3      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.51G    0.03827     0.0324    0.01933         15        640: 1\n",
      "tensor([0.99560], device='cuda:0', grad_fn=<AddBackward0>) tensor(11022.83008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.145      0.273      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.51G    0.03784    0.03206    0.01893         27        640: 1\n",
      "tensor([0.94770], device='cuda:0', grad_fn=<AddBackward0>) tensor(10989.14062, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.89      0.155      0.284      0.137\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.51G     0.0374    0.03189    0.01783         16        640: 1\n",
      "tensor([0.94155], device='cuda:0', grad_fn=<AddBackward0>) tensor(10610.55664, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.925      0.139      0.253      0.119\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.51G    0.03696    0.03143    0.01703         31        640: 1\n",
      "tensor([0.97820], device='cuda:0', grad_fn=<AddBackward0>) tensor(10455.63574, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923      0.144      0.262      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.51G    0.03662    0.03136    0.01692         24        640: 1\n",
      "tensor([0.89955], device='cuda:0', grad_fn=<AddBackward0>) tensor(10637.80859, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.143      0.277      0.138\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.51G    0.03588    0.03076    0.01576         28        640: 1\n",
      "tensor([1.20599], device='cuda:0', grad_fn=<AddBackward0>) tensor(12050.11914, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.927      0.143      0.282      0.131\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.51G    0.03559    0.03029    0.01522          9        640: 1\n",
      "tensor([1.08348], device='cuda:0', grad_fn=<AddBackward0>) tensor(10504.95605, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.931      0.139      0.296       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.51G    0.03492    0.03042    0.01466         37        640: 1\n",
      "tensor([0.97961], device='cuda:0', grad_fn=<AddBackward0>) tensor(10753.79883, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.143      0.279      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.51G    0.03472    0.02973    0.01492         26        640: 1\n",
      "tensor([0.92581], device='cuda:0', grad_fn=<AddBackward0>) tensor(10163.46094, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.925      0.141      0.275      0.135\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.51G     0.0341    0.02984    0.01437         25        640: 1\n",
      "tensor([0.88859], device='cuda:0', grad_fn=<AddBackward0>) tensor(10657.98145, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.149      0.288      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.51G    0.03381    0.02881    0.01379         30        640: 1\n",
      "tensor([0.98324], device='cuda:0', grad_fn=<AddBackward0>) tensor(10521.34668, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.933      0.142      0.286       0.14\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.51G     0.0331    0.02832    0.01344         19        640: 1\n",
      "tensor([0.89450], device='cuda:0', grad_fn=<AddBackward0>) tensor(11590.36621, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.934      0.146      0.308       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.51G    0.03301    0.02838    0.01302         14        640: 1\n",
      "tensor([0.88744], device='cuda:0', grad_fn=<AddBackward0>) tensor(11303.13965, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.932      0.144      0.299      0.152\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.51G    0.03295    0.02858    0.01236         38        640: 1\n",
      "tensor([0.98263], device='cuda:0', grad_fn=<AddBackward0>) tensor(11196.55566, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891      0.165      0.297       0.15\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.51G    0.03189    0.02795     0.0124         20        640: 1\n",
      "tensor([0.73333], device='cuda:0', grad_fn=<AddBackward0>) tensor(9425.46484, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.893      0.157      0.295      0.146\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.51G    0.03183    0.02835    0.01185         30        640: 1\n",
      "tensor([1.20295], device='cuda:0', grad_fn=<AddBackward0>) tensor(12472.09570, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.933      0.143       0.29      0.147\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.51G    0.03176    0.02797    0.01179         21        640: 1\n",
      "tensor([0.90653], device='cuda:0', grad_fn=<AddBackward0>) tensor(10994.67285, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.89      0.156      0.278      0.142\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.51G    0.03096    0.02703    0.01174         29        640: 1\n",
      "tensor([0.88457], device='cuda:0', grad_fn=<AddBackward0>) tensor(11132.65723, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898      0.159      0.305      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.51G    0.03077    0.02806    0.01137         28        640: 1\n",
      "tensor([0.86411], device='cuda:0', grad_fn=<AddBackward0>) tensor(11118.63281, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.901      0.158      0.297      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.51G    0.03057    0.02721     0.0112         19        640: 1\n",
      "tensor([0.79087], device='cuda:0', grad_fn=<AddBackward0>) tensor(9034.77734, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.892      0.163      0.311      0.151\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.51G    0.03026    0.02696    0.01103         22        640: 1\n",
      "tensor([0.76057], device='cuda:0', grad_fn=<AddBackward0>) tensor(9840.82422, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.903      0.157      0.314      0.159\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.51G    0.02974     0.0272    0.01028         32        640: 1\n",
      "tensor([0.92779], device='cuda:0', grad_fn=<AddBackward0>) tensor(10800.89746, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898      0.163      0.329      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.51G    0.02936    0.02698    0.01018         18        640: 1\n",
      "tensor([1.03483], device='cuda:0', grad_fn=<AddBackward0>) tensor(12043.10254, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.895      0.165      0.329      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.51G    0.02867    0.02664   0.009861         33        640: 1\n",
      "tensor([0.92176], device='cuda:0', grad_fn=<AddBackward0>) tensor(10738.25977, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.894      0.161       0.34      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.51G    0.02832    0.02645   0.009268         38        640: 1\n",
      "tensor([0.84310], device='cuda:0', grad_fn=<AddBackward0>) tensor(9410.24805, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.158      0.311      0.162\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.51G    0.02817    0.02594   0.009209         46        640: 1\n",
      "tensor([0.85504], device='cuda:0', grad_fn=<AddBackward0>) tensor(9358.05078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.895      0.168      0.342      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.51G    0.02801    0.02525   0.009268         27        640: 1\n",
      "tensor([0.76027], device='cuda:0', grad_fn=<AddBackward0>) tensor(9234.90625, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.889      0.165      0.315      0.163\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.51G    0.02728    0.02585   0.008687         18        640: 1\n",
      "tensor([0.78971], device='cuda:0', grad_fn=<AddBackward0>) tensor(9450.75684, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.899      0.162       0.32      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.51G    0.02742    0.02514    0.00958         24        640: 1\n",
      "tensor([0.83805], device='cuda:0', grad_fn=<AddBackward0>) tensor(10067.13770, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.9      0.164      0.339       0.17\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.51G    0.02708    0.02582   0.008751         21        640: 1\n",
      "tensor([0.67919], device='cuda:0', grad_fn=<AddBackward0>) tensor(8196.14551, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.902      0.167      0.337      0.167\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.51G    0.02673    0.02531    0.00845         28        640: 1\n",
      "tensor([0.77824], device='cuda:0', grad_fn=<AddBackward0>) tensor(9928.18457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.158      0.326      0.168\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.51G    0.02656    0.02495   0.008281         34        640: 1\n",
      "tensor([0.91727], device='cuda:0', grad_fn=<AddBackward0>) tensor(10693.41895, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911      0.159       0.33      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.51G    0.02629    0.02455   0.008501         29        640: 1\n",
      "tensor([0.78023], device='cuda:0', grad_fn=<AddBackward0>) tensor(8949.93359, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.896      0.163      0.328      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.51G    0.02566    0.02443   0.008188         23        640: 1\n",
      "tensor([0.79546], device='cuda:0', grad_fn=<AddBackward0>) tensor(11479.78125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904      0.166      0.333      0.169\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.51G    0.02518    0.02421   0.008279         28        640: 1\n",
      "tensor([0.75237], device='cuda:0', grad_fn=<AddBackward0>) tensor(8337.04199, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.896      0.163      0.337      0.173\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.51G     0.0252    0.02385   0.008116         19        640: 1\n",
      "tensor([0.65296], device='cuda:0', grad_fn=<AddBackward0>) tensor(8336.87402, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904      0.163      0.336      0.173\n",
      "\n",
      "50 epochs completed in 0.479 hours.\n",
      "Optimizer stripped from runs/train/exp34/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp34/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp34/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.903      0.163      0.337      0.173\n",
      "                   car       1048       4012      0.754      0.742      0.777      0.461\n",
      "                   van       1048        431      0.963     0.0255      0.445      0.247\n",
      "                 truck       1048        166          1          0        0.3      0.183\n",
      "                  tram       1048         56          1          0      0.454      0.209\n",
      "                person       1048        618      0.509      0.536      0.486      0.216\n",
      "        person_sitting       1048         20          1          0     0.0313    0.00482\n",
      "               cyclist       1048        234          1          0      0.121     0.0337\n",
      "                  misc       1048        138          1          0     0.0775     0.0296\n",
      "Results saved to \u001b[1mruns/train/exp34\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/9e2aa907f14b472c800c4ee331543c15\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.7480146712755236\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 970.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.7771195162635256\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.4605797778999549\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.7542163092425004\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.7419141890311715\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 2977.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.12117305402039434\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.033715776327974886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (0.8380496501922607, 5.6137166023254395)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.05450159591711813, 0.3737988522172412)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.027584579079518022, 0.17307456872234095)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.04069370717484862, 0.9341061182658112)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.10426222273137534, 0.2326015418481141)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.07754503759780214\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.029577635410838837\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.522003913371074\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 319.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.4862367803346723\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.21580992188842404\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.5090821749961535\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.5355987055016181\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.03126536078158429\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.004822400454257996\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 331.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.025182025507092476, 0.0828334391117096)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.008115709759294987, 0.07221266627311707)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.023849867284297943, 0.049152672290802)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.45388621316996514\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.2090593642593595\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.29988378802379234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.1831061302969857\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.04538332298398018, 0.08671673387289047)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.026014866307377815, 0.05662788823246956)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.06628302484750748, 0.11725270003080368)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.049725707898992486\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.44539191875556366\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.24703071601246446\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 0.9626251934995104\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.025522041763341066\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/9e2aa907f14b472c800c4ee331543c15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp34\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.80 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 5e-5 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "daaa2517-f028-427a-9438-fe86c3df2eb9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db725a18-941d-4aaa-baa5-39a8b34e2300",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "b369f956-5bac-44d7-91bb-2d8026a9357f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp34/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.697      0.606      0.643      0.392\n",
      "                   car       4952       1201       0.83      0.813      0.857      0.614\n",
      "                person       4952       4528      0.819      0.775      0.826      0.498\n",
      "             aeroplane       4952        285      0.759      0.674      0.737      0.419\n",
      "               bicycle       4952        337      0.834      0.686      0.771      0.468\n",
      "                  bird       4952        459      0.609       0.52      0.544      0.304\n",
      "                  boat       4952        263       0.53      0.433      0.418      0.214\n",
      "                bottle       4952        469      0.702      0.537      0.542      0.327\n",
      "                   bus       4952        213      0.742       0.62      0.718       0.53\n",
      "                   cat       4952        358       0.69       0.64      0.654      0.376\n",
      "                 chair       4952        756      0.629      0.386      0.446      0.251\n",
      "                   cow       4952        244      0.648      0.652      0.695      0.445\n",
      "           diningtable       4952        206      0.718      0.495      0.566      0.327\n",
      "                   dog       4952        489      0.622      0.562      0.608      0.357\n",
      "                 horse       4952        348      0.808      0.761      0.804      0.508\n",
      "             motorbike       4952        325      0.724       0.72      0.746      0.437\n",
      "           pottedplant       4952        480      0.576      0.362      0.398      0.186\n",
      "                 sheep       4952        242      0.586      0.644      0.612      0.396\n",
      "                  sofa       4952        239      0.618      0.582      0.572      0.357\n",
      "                 train       4952        282      0.782      0.695       0.73      0.444\n",
      "             tvmonitor       4952        308      0.721      0.554      0.608      0.392\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp151\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp34/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 5e-5 新"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b48f03e7-bb74-4c36-b5a6-51725ac236b4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "200b7e92-91ae-4810-bb5c-ac50af59d642",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp34/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.89      0.166      0.348      0.179\n",
      "                   car       2244       8711      0.753      0.748      0.784      0.463\n",
      "                   van       2244        861      0.869     0.0186      0.389      0.227\n",
      "                 truck       2244        333          1          0       0.38       0.22\n",
      "                  tram       2244        138          1          0      0.423      0.189\n",
      "                person       2244       1286      0.496      0.561      0.483      0.216\n",
      "        person_sitting       2244         89          1          0      0.115     0.0406\n",
      "               cyclist       2244        496          1          0      0.114      0.033\n",
      "                  misc       2244        284          1          0     0.0974     0.0394\n",
      "Speed: 0.1ms pre-process, 0.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp152\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp34/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "#5e-5 旧数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "54965327-e0d3-4a2b-9b96-b0c1a726eebe",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "72380c1c-03cf-4b57-82b8-07ba7d662d6a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "f7b68315-5239-4bad-b06b-d8f8b9ec7ec9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\n1e-4 : new 0.327 old:0.25\\n5e-5 : new 0.392 old:0.179\\n1e-3 : new 0.0873 old:0.331\\n'"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "1e-4 : new 0.327 old:0.25\n",
    "5e-5 : new 0.392 old:0.179\n",
    "1e-3 : new 0.0873 old:0.331\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6673a9a-347a-4f57-a0e6-3425e927386d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "beab1e6d-c992-417d-be9b-2f2557dfd6f0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "86bf7546-cd41-4ed2-a89a-fe7d6b9c3c36",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0001, Lwf_temperature=5.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/522f9cb48e0b46ad855547466c5e560d\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp35/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp35\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49       3.5G    0.08476    0.04914    0.07201         32        640: 1\n",
      "tensor([2.27408], device='cuda:0', grad_fn=<AddBackward0>) tensor(12533.48145, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0258      0.258     0.0582     0.0241\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49       3.5G     0.0654    0.04753    0.06172         14        640: 1\n",
      "tensor([1.39197], device='cuda:0', grad_fn=<AddBackward0>) tensor(8929.24316, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.829     0.0917      0.232     0.0912\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49       3.5G    0.05977     0.0431    0.05844         33        640: 1\n",
      "tensor([2.05363], device='cuda:0', grad_fn=<AddBackward0>) tensor(11621.11035, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.712      0.276      0.366      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49       3.5G    0.05607    0.04134    0.05305         29        640: 1\n",
      "tensor([2.07991], device='cuda:0', grad_fn=<AddBackward0>) tensor(13580.66699, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.507      0.356        0.4       0.18\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49       3.5G    0.05306    0.04024    0.04985         21        640: 1\n",
      "tensor([2.14227], device='cuda:0', grad_fn=<AddBackward0>) tensor(13960.83203, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.596      0.393      0.457      0.202\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49       3.5G    0.05175    0.04047    0.04622         21        640: 1\n",
      "tensor([1.99713], device='cuda:0', grad_fn=<AddBackward0>) tensor(12660.20508, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.53      0.402      0.459      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49       3.5G    0.04991    0.03972    0.04284         37        640: 1\n",
      "tensor([1.85271], device='cuda:0', grad_fn=<AddBackward0>) tensor(11019.98633, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.57      0.437      0.498      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49       3.5G    0.04824    0.03899    0.04054         24        640: 1\n",
      "tensor([2.11485], device='cuda:0', grad_fn=<AddBackward0>) tensor(13767.82617, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.625      0.441      0.516      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49       3.5G    0.04761    0.03833    0.03759         13        640: 1\n",
      "tensor([1.92905], device='cuda:0', grad_fn=<AddBackward0>) tensor(12662.10156, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.556      0.403      0.455      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49       3.5G     0.0462    0.03778    0.03608         22        640: 1\n",
      "tensor([1.68558], device='cuda:0', grad_fn=<AddBackward0>) tensor(12457.95898, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.599      0.435      0.514      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49       3.5G    0.04576    0.03717    0.03446         32        640: 1\n",
      "tensor([1.94091], device='cuda:0', grad_fn=<AddBackward0>) tensor(13282.07227, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.701      0.393      0.529      0.247\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49       3.5G    0.04482    0.03718    0.03344         24        640: 1\n",
      "tensor([1.90457], device='cuda:0', grad_fn=<AddBackward0>) tensor(12802.77637, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.613      0.419      0.499      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49       3.5G    0.04457     0.0365    0.03234         33        640: 1\n",
      "tensor([1.59096], device='cuda:0', grad_fn=<AddBackward0>) tensor(9883.19824, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.643       0.44       0.52      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49       3.5G    0.04344     0.0368    0.03034         15        640: 1\n",
      "tensor([1.56890], device='cuda:0', grad_fn=<AddBackward0>) tensor(12056.29590, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.599       0.45      0.518      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49       3.5G    0.04241    0.03634    0.02995         15        640: 1\n",
      "tensor([1.65045], device='cuda:0', grad_fn=<AddBackward0>) tensor(11315.64258, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.698      0.427      0.549      0.248\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49       3.5G    0.04234    0.03626    0.02888         27        640: 1\n",
      "tensor([1.57344], device='cuda:0', grad_fn=<AddBackward0>) tensor(11249.05664, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.594      0.456      0.536      0.257\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49       3.5G    0.04175    0.03625    0.02837         16        640: 1\n",
      "tensor([1.54713], device='cuda:0', grad_fn=<AddBackward0>) tensor(10170.19922, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.583      0.424      0.485      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49       3.5G    0.04164    0.03597    0.02707         31        640: 1\n",
      "tensor([1.67084], device='cuda:0', grad_fn=<AddBackward0>) tensor(11372.26074, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.654       0.46      0.554      0.274\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49       3.5G    0.04081    0.03576    0.02657         24        640: 1\n",
      "tensor([1.52075], device='cuda:0', grad_fn=<AddBackward0>) tensor(10379.65918, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.723      0.448      0.556      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49       3.5G    0.04056    0.03523     0.0257         28        640: 1\n",
      "tensor([2.18738], device='cuda:0', grad_fn=<AddBackward0>) tensor(13070.62695, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.654      0.422      0.529      0.254\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49       3.5G    0.04055     0.0348    0.02552          9        640: 1\n",
      "tensor([1.65060], device='cuda:0', grad_fn=<AddBackward0>) tensor(9850.77148, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.591      0.435      0.518      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49       3.5G    0.03982    0.03509    0.02411         37        640: 1\n",
      "tensor([1.72138], device='cuda:0', grad_fn=<AddBackward0>) tensor(11958.90723, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.647      0.399      0.502      0.233\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49       3.5G     0.0392    0.03452    0.02501         26        640: 1\n",
      "tensor([1.48344], device='cuda:0', grad_fn=<AddBackward0>) tensor(9209.88867, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.656      0.436      0.546      0.257\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49       3.5G    0.03856     0.0348     0.0246         25        640: 1\n",
      "tensor([1.50220], device='cuda:0', grad_fn=<AddBackward0>) tensor(10584.60840, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.646      0.451      0.544      0.246\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49       3.5G    0.03854    0.03352    0.02371         30        640: 1\n",
      "tensor([1.50435], device='cuda:0', grad_fn=<AddBackward0>) tensor(9979.35156, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.674      0.487      0.576       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49       3.5G    0.03806    0.03322    0.02294         19        640: 1\n",
      "tensor([1.61823], device='cuda:0', grad_fn=<AddBackward0>) tensor(11801.64844, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.673      0.477      0.566      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49       3.5G    0.03785    0.03338    0.02259         14        640: 1\n",
      "tensor([1.47869], device='cuda:0', grad_fn=<AddBackward0>) tensor(10643.37109, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.716      0.473      0.593      0.275\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49       3.5G    0.03755    0.03376    0.02142         38        640: 1\n",
      "tensor([1.73115], device='cuda:0', grad_fn=<AddBackward0>) tensor(11672.25977, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.689      0.492      0.578      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49       3.5G    0.03675    0.03312    0.02197         20        640: 1\n",
      "tensor([1.12595], device='cuda:0', grad_fn=<AddBackward0>) tensor(8106.09619, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.689      0.513      0.573      0.268\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49       3.5G    0.03712    0.03382    0.02118         30        640: 1\n",
      "tensor([1.93170], device='cuda:0', grad_fn=<AddBackward0>) tensor(12521.68359, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.677      0.492      0.587      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49       3.5G    0.03694    0.03317     0.0213         21        640: 1\n",
      "tensor([1.51148], device='cuda:0', grad_fn=<AddBackward0>) tensor(10818.42578, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.646      0.496       0.57      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49       3.5G    0.03597    0.03245    0.02101         29        640: 1\n",
      "tensor([1.56646], device='cuda:0', grad_fn=<AddBackward0>) tensor(11222.78125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.685      0.502      0.583      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49       3.5G    0.03596    0.03352    0.02059         28        640: 1\n",
      "tensor([1.44603], device='cuda:0', grad_fn=<AddBackward0>) tensor(10916.44531, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.72      0.475      0.574      0.279\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49       3.5G    0.03561     0.0327    0.02074         19        640: 1\n",
      "tensor([1.14561], device='cuda:0', grad_fn=<AddBackward0>) tensor(7478.62354, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.703      0.472      0.559      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49       3.5G    0.03558    0.03249    0.01974         22        640: 1\n",
      "tensor([1.16778], device='cuda:0', grad_fn=<AddBackward0>) tensor(8519.58008, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.709      0.506      0.589      0.273\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49       3.5G    0.03497    0.03293    0.01907         32        640: 1\n",
      "tensor([1.50311], device='cuda:0', grad_fn=<AddBackward0>) tensor(10519.74512, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.718      0.506      0.611      0.299\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49       3.5G    0.03482     0.0328    0.01939         18        640: 1\n",
      "tensor([1.59422], device='cuda:0', grad_fn=<AddBackward0>) tensor(11248.65234, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.653      0.508       0.58      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49       3.5G     0.0339    0.03241    0.01912         33        640: 1\n",
      "tensor([1.42556], device='cuda:0', grad_fn=<AddBackward0>) tensor(9028.10156, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.704      0.487        0.6      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49       3.5G    0.03371     0.0325     0.0182         38        640: 1\n",
      "tensor([1.24477], device='cuda:0', grad_fn=<AddBackward0>) tensor(7536.05273, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.705      0.494      0.605      0.291\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49       3.5G     0.0336    0.03222    0.01825         46        640: 1\n",
      "tensor([1.24141], device='cuda:0', grad_fn=<AddBackward0>) tensor(7643.78809, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.696      0.515      0.602      0.296\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49       3.5G     0.0338    0.03137    0.01812         27        640: 1\n",
      "tensor([1.14128], device='cuda:0', grad_fn=<AddBackward0>) tensor(7717.94189, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.695      0.535      0.617      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49       3.5G    0.03298     0.0323    0.01776         18        640: 1\n",
      "tensor([1.18044], device='cuda:0', grad_fn=<AddBackward0>) tensor(7809.91016, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.728      0.505      0.612      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49       3.5G    0.03319    0.03132    0.01879         24        640: 1\n",
      "tensor([1.31061], device='cuda:0', grad_fn=<AddBackward0>) tensor(8270.56543, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.729      0.506       0.61      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49       3.5G    0.03296    0.03252    0.01813         21        640: 1\n",
      "tensor([1.03556], device='cuda:0', grad_fn=<AddBackward0>) tensor(6347.22852, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.71      0.534      0.619      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49       3.5G    0.03256    0.03189    0.01735         28        640: 1\n",
      "tensor([1.19856], device='cuda:0', grad_fn=<AddBackward0>) tensor(8005.94824, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.708       0.51      0.609       0.29\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49       3.5G    0.03269    0.03175    0.01775         34        640: 1\n",
      "tensor([1.36958], device='cuda:0', grad_fn=<AddBackward0>) tensor(8900.85254, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.725      0.515      0.616      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49       3.5G    0.03239    0.03118     0.0179         29        640: 1\n",
      "tensor([1.09992], device='cuda:0', grad_fn=<AddBackward0>) tensor(6111.87549, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.724      0.526      0.636      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49       3.5G    0.03182    0.03116    0.01753         23        640: 1\n",
      "tensor([1.36927], device='cuda:0', grad_fn=<AddBackward0>) tensor(10082.10938, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.723      0.525      0.621      0.313\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49       3.5G    0.03145    0.03098    0.01691         28        640: 1\n",
      "tensor([0.98976], device='cuda:0', grad_fn=<AddBackward0>) tensor(5493.59326, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.71      0.532      0.616       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49       3.5G    0.03134    0.03064    0.01715         19        640: 1\n",
      "tensor([0.97846], device='cuda:0', grad_fn=<AddBackward0>) tensor(5989.79639, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.735      0.535       0.63      0.314\n",
      "\n",
      "50 epochs completed in 0.478 hours.\n",
      "Optimizer stripped from runs/train/exp35/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp35/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp35/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.728       0.53      0.631      0.314\n",
      "                   car       1048       4012      0.737      0.844      0.858      0.535\n",
      "                   van       1048        431      0.926      0.608      0.805      0.441\n",
      "                 truck       1048        166      0.982      0.674      0.828      0.463\n",
      "                  tram       1048         56      0.748      0.732      0.758      0.332\n",
      "                person       1048        618      0.467      0.623      0.557      0.245\n",
      "        person_sitting       1048         20      0.393       0.25      0.246      0.105\n",
      "               cyclist       1048        234       0.87      0.314      0.548       0.18\n",
      "                  misc       1048        138      0.701      0.196      0.447      0.212\n",
      "Results saved to \u001b[1mruns/train/exp35\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/522f9cb48e0b46ad855547466c5e560d\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.7865528832033142\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 1210.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.8582044338077011\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.5353744108932696\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.7366418949370391\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.8437188434695913\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 3385.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.46108709372260454\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.5481930055540578\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.1797384456084599\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 0.869674204712568\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.31370389341403837\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 73.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (1.3106135129928589, 10.761479377746582)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.05822985493728665, 0.6358114713861569)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.024133421041419675, 0.31418229099275286)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.025758118150287748, 0.829150430079827)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.0917004658823703, 0.5353143407667584)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.30596225258177284\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.446698878262464\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.2124972193713352\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 0.7014378974594474\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.1956521739130435\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 27.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.5339736845455868\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 439.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.5566030885485085\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.24467727675546605\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.4672224472812107\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.6229773462783171\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.3055766175011976\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.24595085459046748\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.10459555835317924\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 0.3929267837718542\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.25\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 385.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.03134052827954292, 0.0847587063908577)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.016910826787352562, 0.07200920581817627)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.03063933551311493, 0.04914494603872299)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.7398320596185175\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.7575281426649741\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.3319772878080134\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 0.7476844853295932\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.7321428571428571\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 41.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.7992958221201106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.8284714905108579\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.4625019010976015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 0.9824306478228958\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.6737096543811555\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 112.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.04033571854233742, 0.09168867021799088)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.01641971431672573, 0.042703766375780106)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.06259143352508545, 0.11676973104476929)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.7342433052038028\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.8050141295370833\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.4410655643076634\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 0.9258466084482462\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.6083464384269625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 262.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/522f9cb48e0b46ad855547466c5e560d\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp35\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.05 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Still uploading 2 file(s), remaining 77.31 KB/534.32 KB\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 5.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6d9adbe2-d3ab-45da-ad44-a285c79da653",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "4e685ed3-8367-44b1-bfad-fdfe7621a8a3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp35/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.548      0.466      0.463      0.247\n",
      "                   car       4952       1201      0.772      0.774        0.8      0.532\n",
      "                person       4952       4528      0.727       0.69      0.721      0.379\n",
      "             aeroplane       4952        285      0.655       0.53      0.547      0.264\n",
      "               bicycle       4952        337       0.68      0.542        0.6      0.334\n",
      "                  bird       4952        459      0.431      0.301      0.305      0.141\n",
      "                  boat       4952        263      0.325      0.281      0.239      0.104\n",
      "                bottle       4952        469       0.49      0.399      0.349       0.18\n",
      "                   bus       4952        213       0.57      0.502      0.553      0.385\n",
      "                   cat       4952        358      0.551      0.402      0.393      0.186\n",
      "                 chair       4952        756      0.554      0.288      0.319      0.157\n",
      "                   cow       4952        244      0.489      0.533      0.472      0.268\n",
      "           diningtable       4952        206      0.543       0.33       0.36      0.142\n",
      "                   dog       4952        489       0.43      0.276      0.313      0.151\n",
      "                 horse       4952        348      0.649      0.672       0.65       0.33\n",
      "             motorbike       4952        325       0.59      0.628      0.598       0.31\n",
      "           pottedplant       4952        480      0.442      0.265      0.263      0.113\n",
      "                 sheep       4952        242      0.471      0.541      0.477      0.285\n",
      "                  sofa       4952        239       0.47       0.31      0.302      0.156\n",
      "                 train       4952        282      0.582      0.582      0.539      0.271\n",
      "             tvmonitor       4952        308      0.537      0.464      0.451      0.244\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp153\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp35/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 5.0 新"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e690b855-cfac-4c19-a965-b554146617bf",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "23d5ae1a-1427-4aea-bb8e-114ab78092b9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp35/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.752      0.533      0.645      0.319\n",
      "                   car       2244       8711      0.727      0.851      0.861      0.537\n",
      "                   van       2244        861      0.848      0.542       0.74       0.42\n",
      "                 truck       2244        333       0.98      0.723      0.864      0.513\n",
      "                  tram       2244        138      0.799      0.667      0.758      0.313\n",
      "                person       2244       1286      0.435      0.632      0.561      0.254\n",
      "        person_sitting       2244         89      0.539      0.341      0.311       0.11\n",
      "               cyclist       2244        496      0.829      0.292      0.537      0.177\n",
      "                  misc       2244        284      0.858      0.213       0.53      0.227\n",
      "Speed: 0.1ms pre-process, 0.9ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp154\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp35/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "#5.0 旧数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dd914c42-0199-4fd8-ae27-3531d7adae16",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "181246b5-5f4c-4979-a957-285bb2f42a52",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9fb0e92e-d0ef-42c0-9a4f-ae6992854c42",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6fbf3d40-2063-44bd-a2ba-c89cd7fd0b2d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9cab97f2-ed2c-4b92-8695-81beff240633",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp88/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 a471430d Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 160 layers, 7080253 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.818      0.763      0.824      0.533\n",
      "                   car       2244       8711      0.898      0.882      0.943      0.703\n",
      "                   van       2244        861      0.857      0.828       0.89      0.635\n",
      "                 truck       2244        333      0.886      0.928      0.956      0.725\n",
      "                  tram       2244        138      0.831      0.884      0.922       0.59\n",
      "                person       2244       1286      0.849      0.635      0.748      0.388\n",
      "        person_sitting       2244         89      0.547      0.494      0.492      0.262\n",
      "               cyclist       2244        496      0.832      0.724      0.825      0.453\n",
      "                  misc       2244        284      0.841      0.729      0.814      0.507\n",
      "Speed: 0.0ms pre-process, 0.7ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp181\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/exp88/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "459eae26-a3d5-4ed9-976c-a926bc7e51a0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
