{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "8c2fadd5-10a5-4403-bce5-b45533900cfe",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=False\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/729e93e42cb04383b398c13f447a1ee5\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp23/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp23\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49      3.64G    0.08259    0.04901     0.0735         32        640: 1\n",
      "tensor([0.94887], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0467       0.17     0.0459      0.026\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49      3.64G    0.05992    0.04252      0.058         14        640: 1\n",
      "tensor([0.50952], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86     0.0884     0.0941     0.0473\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49      3.64G    0.05519    0.03835    0.05202         33        640: 1\n",
      "tensor([0.81542], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.866       0.11      0.115     0.0423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49      3.64G    0.05074    0.03746    0.04517         29        640: 1\n",
      "tensor([0.59075], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.915      0.107      0.143       0.06\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49      3.64G    0.04829    0.03684    0.03938         21        640: 1\n",
      "tensor([0.68097], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917       0.11      0.148     0.0724\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49      3.64G    0.04674    0.03732    0.03361         21        640: 1\n",
      "tensor([0.63382], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.903      0.122      0.151     0.0719\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49      3.64G    0.04518    0.03663     0.0308         37        640: 1\n",
      "tensor([0.67875], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.914      0.133      0.164     0.0787\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49      3.64G    0.04429    0.03582    0.02785         24        640: 1\n",
      "tensor([0.62663], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.116      0.152     0.0746\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49      3.64G    0.04367    0.03508    0.02501         13        640: 1\n",
      "tensor([0.54582], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.128      0.161     0.0808\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49      3.64G    0.04207    0.03479    0.02424         22        640: 1\n",
      "tensor([0.35977], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.114      0.144     0.0706\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49      3.64G    0.04189     0.0339    0.02277         32        640: 1\n",
      "tensor([0.56312], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.93      0.123      0.161     0.0818\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49      3.64G    0.04112    0.03396    0.02172         24        640: 1\n",
      "tensor([0.49105], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.915      0.132      0.161     0.0827\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49      3.64G     0.0405    0.03332    0.02058         33        640: 1\n",
      "tensor([0.55386], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917      0.133      0.165     0.0827\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49      3.64G    0.03962    0.03352    0.01832         15        640: 1\n",
      "tensor([0.30479], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.921      0.133      0.166      0.083\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49      3.64G    0.03877    0.03299    0.01854         15        640: 1\n",
      "tensor([0.41717], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917      0.129      0.159     0.0796\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49      3.64G    0.03856    0.03245    0.01753         27        640: 1\n",
      "tensor([0.41606], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.918      0.136      0.166      0.082\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49      3.64G    0.03791    0.03256    0.01662         16        640: 1\n",
      "tensor([0.39863], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.915      0.126      0.159     0.0798\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49      3.64G      0.038    0.03219    0.01548         31        640: 1\n",
      "tensor([0.49255], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.916      0.131      0.157     0.0785\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49      3.64G    0.03728    0.03199     0.0158         24        640: 1\n",
      "tensor([0.38793], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.922      0.133      0.158      0.077\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49      3.64G    0.03677    0.03143     0.0142         28        640: 1\n",
      "tensor([0.53301], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.127      0.161     0.0797\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49      3.64G    0.03638    0.03095    0.01381          9        640: 1\n",
      "tensor([0.49340], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.928      0.126      0.162     0.0837\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49      3.64G    0.03541    0.03077    0.01315         37        640: 1\n",
      "tensor([0.43325], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.927      0.128      0.165     0.0842\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49      3.64G    0.03534    0.03017    0.01337         26        640: 1\n",
      "tensor([0.41082], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.139      0.162     0.0828\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49      3.64G    0.03434    0.03038    0.01279         25        640: 1\n",
      "tensor([0.35966], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.925      0.134      0.162     0.0814\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49      3.64G    0.03442    0.02919    0.01225         30        640: 1\n",
      "tensor([0.43795], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.133      0.161     0.0832\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49      3.64G    0.03363    0.02888     0.0118         19        640: 1\n",
      "tensor([0.29607], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.922      0.136      0.163     0.0831\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49      3.64G    0.03363    0.02893     0.0118         14        640: 1\n",
      "tensor([0.25545], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923      0.131      0.162     0.0831\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49      3.64G     0.0335    0.02914    0.01093         38        640: 1\n",
      "tensor([0.40786], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923      0.135      0.166     0.0867\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49      3.64G    0.03225    0.02845    0.01081         20        640: 1\n",
      "tensor([0.25919], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.136      0.173     0.0903\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49      3.64G    0.03238    0.02878    0.01046         30        640: 1\n",
      "tensor([0.59378], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.928      0.132      0.166     0.0843\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49      3.64G    0.03213    0.02817    0.01037         21        640: 1\n",
      "tensor([0.33865], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917      0.135      0.172     0.0909\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49      3.64G    0.03124     0.0273   0.009831         29        640: 1\n",
      "tensor([0.35071], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.133      0.174     0.0919\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49      3.64G    0.03134    0.02831   0.009577         28        640: 1\n",
      "tensor([0.31308], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.92       0.14      0.171     0.0889\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49      3.64G    0.03087    0.02745   0.009556         19        640: 1\n",
      "tensor([0.37006], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.93      0.135      0.168     0.0876\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49      3.64G    0.03066    0.02718   0.009226         22        640: 1\n",
      "tensor([0.26367], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.918      0.137      0.164     0.0841\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49      3.64G    0.03023    0.02736     0.0087         32        640: 1\n",
      "tensor([0.35594], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.925      0.137       0.17     0.0908\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49      3.64G     0.0295    0.02705   0.008638         18        640: 1\n",
      "tensor([0.37504], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.934      0.131       0.17      0.091\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49      3.64G    0.02902    0.02667   0.008211         33        640: 1\n",
      "tensor([0.40501], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.933      0.133      0.172     0.0936\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49      3.64G    0.02857    0.02675   0.007605         38        640: 1\n",
      "tensor([0.36204], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923      0.137      0.181     0.0957\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49      3.64G    0.02839    0.02601   0.007648         46        640: 1\n",
      "tensor([0.38047], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.931      0.135      0.179     0.0961\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49      3.64G    0.02837    0.02538   0.007473         27        640: 1\n",
      "tensor([0.30094], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.936      0.129      0.178     0.0943\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49      3.64G    0.02751    0.02591   0.007251         18        640: 1\n",
      "tensor([0.31138], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926       0.14      0.187      0.101\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49      3.64G     0.0277    0.02518   0.007861         24        640: 1\n",
      "tensor([0.33605], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.923       0.14      0.182     0.0949\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49      3.64G    0.02721     0.0258    0.00703         21        640: 1\n",
      "tensor([0.26700], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.937      0.132      0.175     0.0932\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49      3.64G    0.02684    0.02528   0.006616         28        640: 1\n",
      "tensor([0.26196], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.138      0.172     0.0909\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49      3.64G    0.02655    0.02485   0.006617         34        640: 1\n",
      "tensor([0.31819], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.138      0.172     0.0921\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49      3.64G    0.02632    0.02448   0.006825         29        640: 1\n",
      "tensor([0.32284], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.925      0.139      0.172     0.0911\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49      3.64G    0.02576     0.0244   0.006635         23        640: 1\n",
      "tensor([0.21677], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.924      0.143      0.173     0.0928\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49      3.64G    0.02514     0.0241   0.006526         28        640: 1\n",
      "tensor([0.28142], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.141      0.172     0.0921\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49      3.64G    0.02513    0.02366   0.006286         19        640: 1\n",
      "tensor([0.21216], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926       0.14      0.171     0.0923\n",
      "\n",
      "50 epochs completed in 0.418 hours.\n",
      "Optimizer stripped from runs/train/exp23/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp23/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp23/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.926      0.139      0.187      0.101\n",
      "                   car       1048       4012      0.819      0.635      0.734      0.421\n",
      "                   van       1048        431          1          0      0.121     0.0777\n",
      "                 truck       1048        166          1          0      0.157      0.094\n",
      "                  tram       1048         56          1          0    0.00883    0.00491\n",
      "                person       1048        618      0.589      0.479      0.465      0.203\n",
      "        person_sitting       1048         20          1          0          0          0\n",
      "               cyclist       1048        234          1          0    0.00313   0.000853\n",
      "                  misc       1048        138          1          0    0.00841    0.00434\n",
      "Results saved to \u001b[1mruns/train/exp23\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/729e93e42cb04383b398c13f447a1ee5\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.7157591217382214\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 562.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.7337226891727302\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.4211696558483373\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.8193666125158078\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.635412331573847\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 2549.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.0031325868178485707\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.000853481825575389\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (0.31307700276374817, 3.9002840518951416)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.045889829933697945, 0.18739484427936598)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.026046077965318333, 0.10068289057461453)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.0466699110727597, 0.9371392035433719)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.08843938563668938, 0.16970682499222448)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.00841401933294938\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.004340753481132668\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.5283928235753772\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 206.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.4654057340205783\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.20276178310367005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.5891970981645418\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.47896440129449835\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 296.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.025132572278380394, 0.08259491622447968)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.006285997107625008, 0.07350000739097595)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.023663561791181564, 0.04901263862848282)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.008827173669428165\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.004907534281170521\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.15706712429257608\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.09396945694747019\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.04676824063062668, 0.08575539290904999)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.032826729118824005, 0.07063721865415573)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.07021632045507431, 0.11601188033819199)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.12119130804925765\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.077652952510251\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/729e93e42cb04383b398c13f447a1ee5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Lwf_enable          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp23\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.70 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c1ebc322-37d9-4b81-aae6-4203167ddbfc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1c796465-f15a-42d3-8586-7da0288bf311",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.001, Lwf_temperature=1.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/37bebd60c0f04062991416a3b79f08a2\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp29/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp29\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49       3.5G    0.09392    0.04742    0.07541         32        640: 1\n",
      "tensor([5.52476], device='cuda:0', grad_fn=<AddBackward0>) tensor(4396.16357, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0119      0.323     0.0333     0.0061\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49       3.5G    0.07908    0.05037    0.07128         14        640: 1\n",
      "tensor([5.53693], device='cuda:0', grad_fn=<AddBackward0>) tensor(4950.83350, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0127      0.462     0.0662      0.022\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49       3.5G    0.07271    0.04962    0.06976         33        640: 1\n",
      "tensor([5.78863], device='cuda:0', grad_fn=<AddBackward0>) tensor(4747.39648, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.588     0.0617      0.105     0.0399\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49       3.5G    0.06684    0.04872    0.06539         29        640: 1\n",
      "tensor([7.08181], device='cuda:0', grad_fn=<AddBackward0>) tensor(6258.43457, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.512      0.112      0.195     0.0814\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49       3.5G    0.06293    0.04661    0.06433         21        640: 1\n",
      "tensor([7.37478], device='cuda:0', grad_fn=<AddBackward0>) tensor(6503.32715, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.575       0.22      0.278      0.116\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49       3.5G    0.05984    0.04648    0.06274         21        640: 1\n",
      "tensor([5.79546], device='cuda:0', grad_fn=<AddBackward0>) tensor(4920.47363, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.576      0.331      0.367      0.158\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49       3.5G    0.05794    0.04525    0.06083         37        640: 1\n",
      "tensor([5.22199], device='cuda:0', grad_fn=<AddBackward0>) tensor(4356.71729, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.613      0.377       0.42      0.196\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49       3.5G    0.05653    0.04439    0.05989         24        640: 1\n",
      "tensor([5.48556], device='cuda:0', grad_fn=<AddBackward0>) tensor(4601.71436, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.602      0.408      0.446      0.203\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49       3.5G    0.05486    0.04371    0.05809         13        640: 1\n",
      "tensor([5.79706], device='cuda:0', grad_fn=<AddBackward0>) tensor(5034.23047, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.614      0.414      0.473      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49       3.5G    0.05422    0.04318    0.05827         22        640: 1\n",
      "tensor([4.76677], device='cuda:0', grad_fn=<AddBackward0>) tensor(4234.63867, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.628      0.425       0.51      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49       3.5G     0.0535    0.04283    0.05706         32        640: 1\n",
      "tensor([4.91974], device='cuda:0', grad_fn=<AddBackward0>) tensor(4173.95312, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.656      0.449      0.531      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49       3.5G    0.05303    0.04292    0.05711         24        640: 1\n",
      "tensor([5.77245], device='cuda:0', grad_fn=<AddBackward0>) tensor(4967.66406, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.646      0.446      0.522      0.256\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49       3.5G     0.0528    0.04232    0.05697         33        640: 1\n",
      "tensor([4.19044], device='cuda:0', grad_fn=<AddBackward0>) tensor(3388.39844, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.634      0.478       0.55      0.271\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49       3.5G    0.05192    0.04291    0.05464         15        640: 1\n",
      "tensor([5.14972], device='cuda:0', grad_fn=<AddBackward0>) tensor(4634.02051, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.613      0.475      0.537      0.263\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49       3.5G    0.05107    0.04237    0.05492         15        640: 1\n",
      "tensor([5.20680], device='cuda:0', grad_fn=<AddBackward0>) tensor(4512.20264, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.659      0.481      0.574      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49       3.5G    0.05078     0.0429    0.05486         27        640: 1\n",
      "tensor([4.71241], device='cuda:0', grad_fn=<AddBackward0>) tensor(4112.06641, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.659      0.496      0.576       0.29\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49       3.5G    0.05093    0.04272    0.05466         16        640: 1\n",
      "tensor([4.19155], device='cuda:0', grad_fn=<AddBackward0>) tensor(3430.32764, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.658      0.498      0.576       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49       3.5G    0.05059    0.04259    0.05343         31        640: 1\n",
      "tensor([4.37073], device='cuda:0', grad_fn=<AddBackward0>) tensor(3566.39062, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.668      0.496      0.578      0.284\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49       3.5G    0.05059    0.04255    0.05279         24        640: 1\n",
      "tensor([3.86371], device='cuda:0', grad_fn=<AddBackward0>) tensor(3261.92725, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.675      0.518      0.607      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49       3.5G    0.04992    0.04261    0.05257         28        640: 1\n",
      "tensor([4.81364], device='cuda:0', grad_fn=<AddBackward0>) tensor(3862.84570, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.656      0.504      0.595      0.297\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49       3.5G    0.05004    0.04196    0.05239          9        640: 1\n",
      "tensor([3.95749], device='cuda:0', grad_fn=<AddBackward0>) tensor(3211.59619, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.691      0.496      0.603      0.293\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49       3.5G     0.0496    0.04242    0.05131         37        640: 1\n",
      "tensor([4.26487], device='cuda:0', grad_fn=<AddBackward0>) tensor(3467.16846, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.654      0.518      0.585      0.287\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49       3.5G    0.04912      0.042    0.05211         26        640: 1\n",
      "tensor([3.62108], device='cuda:0', grad_fn=<AddBackward0>) tensor(2848.90039, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.656      0.541      0.607        0.3\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49       3.5G    0.04911    0.04227    0.05202         25        640: 1\n",
      "tensor([3.50759], device='cuda:0', grad_fn=<AddBackward0>) tensor(2877.38867, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.68      0.546      0.618      0.312\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49       3.5G    0.04877    0.04109    0.05127         30        640: 1\n",
      "tensor([3.52865], device='cuda:0', grad_fn=<AddBackward0>) tensor(2834.33203, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.65      0.522      0.609      0.299\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49       3.5G    0.04863    0.04084    0.05086         19        640: 1\n",
      "tensor([4.02274], device='cuda:0', grad_fn=<AddBackward0>) tensor(3413.68652, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.655      0.562      0.607      0.287\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49       3.5G    0.04869     0.0414    0.05096         14        640: 1\n",
      "tensor([3.53345], device='cuda:0', grad_fn=<AddBackward0>) tensor(2964.20117, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.672       0.56      0.617      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49       3.5G    0.04863    0.04173    0.04986         38        640: 1\n",
      "tensor([3.62547], device='cuda:0', grad_fn=<AddBackward0>) tensor(2789.37036, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.694       0.55      0.637      0.315\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49       3.5G    0.04822    0.04138     0.0508         20        640: 1\n",
      "tensor([2.97333], device='cuda:0', grad_fn=<AddBackward0>) tensor(2323.86035, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.664      0.554      0.627      0.307\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49       3.5G    0.04864    0.04221    0.04948         30        640: 1\n",
      "tensor([4.13365], device='cuda:0', grad_fn=<AddBackward0>) tensor(3269.28125, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.676      0.564      0.641      0.316\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49       3.5G    0.04873    0.04163    0.04987         21        640: 1\n",
      "tensor([3.57699], device='cuda:0', grad_fn=<AddBackward0>) tensor(2866.06226, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.675      0.555      0.623      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49       3.5G    0.04798    0.04068    0.04943         29        640: 1\n",
      "tensor([3.85483], device='cuda:0', grad_fn=<AddBackward0>) tensor(3211.57251, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.682      0.559       0.63      0.308\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49       3.5G     0.0484    0.04215    0.04918         28        640: 1\n",
      "tensor([2.92633], device='cuda:0', grad_fn=<AddBackward0>) tensor(2383.48047, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.682      0.589      0.636      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49       3.5G    0.04753     0.0414    0.04999         19        640: 1\n",
      "tensor([2.41083], device='cuda:0', grad_fn=<AddBackward0>) tensor(1851.17871, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.683       0.57      0.642      0.309\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49       3.5G    0.04847    0.04137    0.04957         22        640: 1\n",
      "tensor([2.50400], device='cuda:0', grad_fn=<AddBackward0>) tensor(1925.67053, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.687      0.567       0.64      0.308\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49       3.5G    0.04796    0.04217    0.04814         32        640: 1\n",
      "tensor([2.89948], device='cuda:0', grad_fn=<AddBackward0>) tensor(2191.67407, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.7      0.564      0.645       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49       3.5G    0.04738    0.04176    0.04806         18        640: 1\n",
      "tensor([3.07332], device='cuda:0', grad_fn=<AddBackward0>) tensor(2368.66602, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.685      0.584      0.634      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49       3.5G    0.04732    0.04168    0.04855         33        640: 1\n",
      "tensor([2.48625], device='cuda:0', grad_fn=<AddBackward0>) tensor(1771.69409, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.66      0.582      0.631      0.304\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49       3.5G    0.04743    0.04204    0.04813         38        640: 1\n",
      "tensor([2.23547], device='cuda:0', grad_fn=<AddBackward0>) tensor(1520.33142, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.675      0.597      0.644      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49       3.5G    0.04711    0.04175    0.04813         46        640: 1\n",
      "tensor([2.14274], device='cuda:0', grad_fn=<AddBackward0>) tensor(1391.63843, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.701       0.59      0.657      0.315\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49       3.5G    0.04765    0.04076    0.04812         27        640: 1\n",
      "tensor([2.17273], device='cuda:0', grad_fn=<AddBackward0>) tensor(1621.68921, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.683      0.607      0.655      0.318\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49       3.5G    0.04728    0.04223    0.04815         18        640: 1\n",
      "tensor([2.01907], device='cuda:0', grad_fn=<AddBackward0>) tensor(1402.83643, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.69      0.579      0.653      0.319\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49       3.5G    0.04711    0.04045    0.04856         24        640: 1\n",
      "tensor([2.13771], device='cuda:0', grad_fn=<AddBackward0>) tensor(1345.90393, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.676        0.6      0.645      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49       3.5G    0.04743    0.04232    0.04779         21        640: 1\n",
      "tensor([1.70031], device='cuda:0', grad_fn=<AddBackward0>) tensor(1103.87451, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.675      0.604      0.653      0.318\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49       3.5G    0.04712    0.04204    0.04796         28        640: 1\n",
      "tensor([1.75796], device='cuda:0', grad_fn=<AddBackward0>) tensor(1174.96045, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.699      0.599      0.656      0.313\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49       3.5G    0.04755    0.04162    0.04788         34        640: 1\n",
      "tensor([1.97339], device='cuda:0', grad_fn=<AddBackward0>) tensor(1293.76965, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.708      0.604      0.651      0.308\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49       3.5G    0.04737    0.04116    0.04824         29        640: 1\n",
      "tensor([1.83161], device='cuda:0', grad_fn=<AddBackward0>) tensor(1045.20056, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.689      0.597      0.646       0.31\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49       3.5G    0.04686    0.04133    0.04831         23        640: 1\n",
      "tensor([1.97632], device='cuda:0', grad_fn=<AddBackward0>) tensor(1253.89587, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.704      0.595       0.66      0.318\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49       3.5G    0.04676    0.04133    0.04732         28        640: 1\n",
      "tensor([1.49580], device='cuda:0', grad_fn=<AddBackward0>) tensor(893.71533, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.695      0.601       0.66      0.314\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49       3.5G    0.04682    0.04092    0.04835         19        640: 1\n",
      "tensor([1.46875], device='cuda:0', grad_fn=<AddBackward0>) tensor(902.51508, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.705      0.603      0.663      0.318\n",
      "\n",
      "50 epochs completed in 0.495 hours.\n",
      "Optimizer stripped from runs/train/exp29/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp29/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp29/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.704      0.595       0.66      0.319\n",
      "                   car       1048       4012      0.701      0.798      0.812      0.498\n",
      "                   van       1048        431      0.829      0.675      0.768      0.453\n",
      "                 truck       1048        166      0.885      0.648       0.81      0.402\n",
      "                  tram       1048         56      0.816      0.768       0.79      0.364\n",
      "                person       1048        618      0.442      0.625      0.559      0.253\n",
      "        person_sitting       1048         20      0.444       0.45      0.418      0.107\n",
      "               cyclist       1048        234      0.719       0.46      0.542      0.208\n",
      "                  misc       1048        138      0.799      0.341      0.584      0.263\n",
      "Results saved to \u001b[1mruns/train/exp29\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/37bebd60c0f04062991416a3b79f08a2\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.7462779145313028\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 1367.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.8117369033545638\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.49785056360421615\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.7007709176378605\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.7981056829511466\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 3202.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.5608962727420107\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 42.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.5418335000089671\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.20842724106259874\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 0.719196310404364\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.4597107791552236\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 108.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (2.137707233428955, 38.1689338684082)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.03331477392790953, 0.6632763254941199)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.0061022063125859855, 0.3190898113347578)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.01194191870925082, 0.7081823992262173)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.061695254779626646, 0.6073759754872836)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.4776256411152124\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.5836493068237008\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.2631001877001569\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 0.7992265611558521\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.34057971014492755\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 47.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.517533822903544\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 488.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.5589372664421344\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.2533495443655185\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.4418043289356864\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.6245954692556634\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.44716142572929457\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.41761322066288115\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.10668822181988087\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 0.4443584381084381\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.45\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 386.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.04676388204097748, 0.09391898661851883)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.047316208481788635, 0.0754123330116272)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.0404469333589077, 0.05037279799580574)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.7913731256336164\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.7903833288001302\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.36390283319378586\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 0.8163749900880113\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.7678571428571429\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 43.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.7481014575269616\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.8104235508270613\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.4015232356040892\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 0.884831835403122\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.6479722694582133\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 108.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.04162399470806122, 0.10341183841228485)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.013617179356515408, 0.04283810779452324)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.062225341796875, 0.11798789352178574)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.7442003975124206\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 60.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.7680002230985445\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.4534438269638513\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 0.8289478411006189\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.6751740139211136\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 291.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/37bebd60c0f04062991416a3b79f08a2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp29\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.09 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-3 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "6e27c778-6d92-497b-b549-edbc20f3e2db",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp29/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198       0.71        0.6      0.685      0.331\n",
      "                   car       2244       8711      0.743      0.784      0.819      0.502\n",
      "                   van       2244        861      0.814      0.586      0.706      0.412\n",
      "                 truck       2244        333      0.912       0.65      0.853      0.441\n",
      "                  tram       2244        138      0.887      0.754      0.874      0.386\n",
      "                person       2244       1286      0.467      0.646      0.588      0.262\n",
      "        person_sitting       2244         89      0.349       0.64      0.543       0.21\n",
      "               cyclist       2244        496      0.718      0.405      0.515      0.174\n",
      "                  misc       2244        284      0.789      0.335      0.581      0.257\n",
      "Speed: 0.1ms pre-process, 0.9ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp144\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp29/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "#1e-3 旧数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3db9446d-8703-4825-9e52-307d5e878ff9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "431e4cfd-c39a-4345-b5f4-d02b8dd3242c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e8d74298-9c16-4170-b91f-048b3db3a6f2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp29/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.257      0.263      0.195     0.0873\n",
      "                   car       4952       1201      0.408      0.677      0.617      0.377\n",
      "                person       4952       4528       0.45      0.576      0.494      0.213\n",
      "             aeroplane       4952        285      0.222      0.309      0.193     0.0673\n",
      "               bicycle       4952        337      0.341      0.383      0.303      0.128\n",
      "                  bird       4952        459      0.149     0.0632     0.0507     0.0199\n",
      "                  boat       4952        263       0.17     0.0592      0.041     0.0144\n",
      "                bottle       4952        469          0          0     0.0255     0.0094\n",
      "                   bus       4952        213      0.278      0.408      0.256       0.14\n",
      "                   cat       4952        358      0.299      0.209      0.152      0.055\n",
      "                 chair       4952        756      0.254      0.144       0.11     0.0407\n",
      "                   cow       4952        244      0.281      0.316      0.198     0.0998\n",
      "           diningtable       4952        206      0.264     0.0631     0.0695     0.0198\n",
      "                   dog       4952        489      0.236      0.127      0.148     0.0569\n",
      "                 horse       4952        348      0.265      0.405      0.234     0.0778\n",
      "             motorbike       4952        325      0.313      0.462      0.357      0.146\n",
      "           pottedplant       4952        480       0.17      0.246      0.125       0.04\n",
      "                 sheep       4952        242      0.257      0.252      0.168     0.0896\n",
      "                  sofa       4952        239      0.322      0.012     0.0392     0.0185\n",
      "                 train       4952        282      0.222      0.376      0.205     0.0702\n",
      "             tvmonitor       4952        308      0.232      0.175      0.124     0.0616\n",
      "Speed: 0.1ms pre-process, 1.7ms inference, 1.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp145\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp29/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "#1e-3 新数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "35949a1e-5e6d-45bb-b4c7-5114f7b43968",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp23/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.728      0.651      0.693      0.439\n",
      "                   car       4952       1201      0.833      0.845      0.879      0.639\n",
      "                person       4952       4528      0.826       0.79      0.842      0.522\n",
      "             aeroplane       4952        285      0.793      0.723       0.77      0.449\n",
      "               bicycle       4952        337      0.811      0.769       0.83      0.527\n",
      "                  bird       4952        459      0.705       0.58      0.611      0.357\n",
      "                  boat       4952        263      0.596      0.506      0.499      0.259\n",
      "                bottle       4952        469      0.703       0.54      0.584      0.368\n",
      "                   bus       4952        213      0.754      0.719      0.782      0.602\n",
      "                   cat       4952        358      0.735      0.704      0.712      0.451\n",
      "                 chair       4952        756       0.69      0.454      0.522       0.31\n",
      "                   cow       4952        244      0.702      0.675      0.697      0.459\n",
      "           diningtable       4952        206      0.716      0.505      0.636      0.366\n",
      "                   dog       4952        489      0.741      0.586      0.683       0.42\n",
      "                 horse       4952        348      0.832      0.787      0.834      0.544\n",
      "             motorbike       4952        325      0.788      0.743       0.78      0.477\n",
      "           pottedplant       4952        480      0.591      0.406      0.428       0.21\n",
      "                 sheep       4952        242      0.557      0.731      0.678      0.451\n",
      "                  sofa       4952        239      0.667      0.582      0.643       0.42\n",
      "                 train       4952        282      0.774      0.762      0.779      0.497\n",
      "             tvmonitor       4952        308       0.75      0.614      0.676      0.452\n",
      "Speed: 0.1ms pre-process, 1.6ms inference, 1.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp146\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp23/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec3b7a32-130d-413e-af64-33bc3a79aa29",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "222ff5c3-2070-4d70-89f1-dd5550153183",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "755942dd-0336-40ca-9741-1f498a80f798",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe08ea7a-5e39-4d7e-8874-500dc06570a5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "14012634-7e26-4aea-831e-6ea792863ea8",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0001, Lwf_temperature=1.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/01a419149c1449a4ae461cf4d4205cac\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp33/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp33\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49       3.5G    0.08354    0.04922    0.07208         32        640: 1\n",
      "tensor([1.69482], device='cuda:0', grad_fn=<AddBackward0>) tensor(6973.96240, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.041      0.239     0.0668     0.0306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49       3.5G    0.06232    0.04554    0.05895         14        640: 1\n",
      "tensor([1.06039], device='cuda:0', grad_fn=<AddBackward0>) tensor(5777.65967, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.833      0.127      0.309      0.133\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49       3.5G    0.05709    0.04128    0.05499         33        640: 1\n",
      "tensor([1.44820], device='cuda:0', grad_fn=<AddBackward0>) tensor(6159.73193, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.767      0.266      0.383      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49       3.5G     0.0537    0.03971    0.04909         29        640: 1\n",
      "tensor([1.46592], device='cuda:0', grad_fn=<AddBackward0>) tensor(8104.77295, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.637      0.354      0.453      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49       3.5G    0.05026    0.03854    0.04451         21        640: 1\n",
      "tensor([1.48891], device='cuda:0', grad_fn=<AddBackward0>) tensor(7823.41357, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.727      0.321      0.461      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49       3.5G    0.04831    0.03874    0.03952         21        640: 1\n",
      "tensor([1.47034], device='cuda:0', grad_fn=<AddBackward0>) tensor(8029.88281, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.663      0.289      0.452      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49       3.5G    0.04704    0.03769    0.03607         37        640: 1\n",
      "tensor([1.46961], device='cuda:0', grad_fn=<AddBackward0>) tensor(7579.78662, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.74      0.232      0.444      0.204\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49       3.5G    0.04521    0.03675    0.03368         24        640: 1\n",
      "tensor([1.51929], device='cuda:0', grad_fn=<AddBackward0>) tensor(8701.73340, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.695       0.24      0.422      0.194\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49       3.5G    0.04431    0.03602    0.03075         13        640: 1\n",
      "tensor([1.48745], device='cuda:0', grad_fn=<AddBackward0>) tensor(8572.00977, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.723      0.211      0.397      0.184\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49       3.5G    0.04325    0.03538    0.02966         22        640: 1\n",
      "tensor([1.18573], device='cuda:0', grad_fn=<AddBackward0>) tensor(8085.79590, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.727      0.225      0.442      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49       3.5G    0.04214    0.03455    0.02782         32        640: 1\n",
      "tensor([1.41144], device='cuda:0', grad_fn=<AddBackward0>) tensor(8612.21973, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.785      0.206      0.445      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49       3.5G    0.04131     0.0346    0.02693         24        640: 1\n",
      "tensor([1.43867], device='cuda:0', grad_fn=<AddBackward0>) tensor(8992.60352, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.745      0.209      0.413      0.189\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49       3.5G    0.04146    0.03388    0.02573         33        640: 1\n",
      "tensor([1.26728], device='cuda:0', grad_fn=<AddBackward0>) tensor(7207.79395, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.698      0.221      0.446        0.2\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49       3.5G    0.04017    0.03389    0.02351         15        640: 1\n",
      "tensor([1.14689], device='cuda:0', grad_fn=<AddBackward0>) tensor(8294.93652, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.693      0.202      0.406      0.189\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49       3.5G    0.03931    0.03354    0.02312         15        640: 1\n",
      "tensor([1.30873], device='cuda:0', grad_fn=<AddBackward0>) tensor(8434.56348, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.685       0.21       0.43      0.203\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49       3.5G    0.03883    0.03319    0.02225         27        640: 1\n",
      "tensor([1.20492], device='cuda:0', grad_fn=<AddBackward0>) tensor(7971.14014, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.745      0.188      0.442      0.205\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49       3.5G    0.03851    0.03307    0.02155         16        640: 1\n",
      "tensor([1.17660], device='cuda:0', grad_fn=<AddBackward0>) tensor(7652.90625, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.653      0.196      0.413      0.196\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49       3.5G    0.03817    0.03271    0.02045         31        640: 1\n",
      "tensor([1.26795], device='cuda:0', grad_fn=<AddBackward0>) tensor(7870.87793, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.729      0.204      0.432      0.202\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49       3.5G    0.03765    0.03255    0.02004         24        640: 1\n",
      "tensor([1.19277], device='cuda:0', grad_fn=<AddBackward0>) tensor(7955.90527, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.201      0.431      0.208\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49       3.5G    0.03686    0.03207    0.01925         28        640: 1\n",
      "tensor([1.62841], device='cuda:0', grad_fn=<AddBackward0>) tensor(8833.13965, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.196      0.408      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49       3.5G    0.03657    0.03161     0.0189          9        640: 1\n",
      "tensor([1.38229], device='cuda:0', grad_fn=<AddBackward0>) tensor(7658.09375, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.828      0.198      0.412      0.192\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49       3.5G    0.03643    0.03166    0.01766         37        640: 1\n",
      "tensor([1.26420], device='cuda:0', grad_fn=<AddBackward0>) tensor(7992.73877, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.805      0.198      0.379      0.182\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49       3.5G    0.03553    0.03116    0.01818         26        640: 1\n",
      "tensor([1.21626], device='cuda:0', grad_fn=<AddBackward0>) tensor(7217.68604, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.747      0.194      0.413      0.196\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49       3.5G    0.03505    0.03122    0.01754         25        640: 1\n",
      "tensor([1.14108], device='cuda:0', grad_fn=<AddBackward0>) tensor(7705.63672, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.198      0.458      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49       3.5G    0.03501    0.03011    0.01694         30        640: 1\n",
      "tensor([1.25358], device='cuda:0', grad_fn=<AddBackward0>) tensor(7619.53613, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.834      0.206       0.44      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49       3.5G    0.03414    0.02978    0.01621         19        640: 1\n",
      "tensor([1.18667], device='cuda:0', grad_fn=<AddBackward0>) tensor(8250.70605, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.753      0.198      0.449      0.212\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49       3.5G    0.03435    0.02982    0.01621         14        640: 1\n",
      "tensor([1.22512], device='cuda:0', grad_fn=<AddBackward0>) tensor(8493.89355, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.793      0.208      0.451      0.209\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49       3.5G    0.03409    0.03002    0.01529         38        640: 1\n",
      "tensor([1.32685], device='cuda:0', grad_fn=<AddBackward0>) tensor(8638.31934, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.79       0.21       0.44      0.213\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49       3.5G    0.03312    0.02945    0.01524         20        640: 1\n",
      "tensor([0.94397], device='cuda:0', grad_fn=<AddBackward0>) tensor(6823.64600, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.784      0.211      0.464      0.217\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49       3.5G    0.03317    0.02985    0.01482         30        640: 1\n",
      "tensor([1.46252], device='cuda:0', grad_fn=<AddBackward0>) tensor(9075.87500, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.815      0.205      0.443      0.216\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49       3.5G    0.03311    0.02946     0.0148         21        640: 1\n",
      "tensor([1.15497], device='cuda:0', grad_fn=<AddBackward0>) tensor(7723.00098, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886        0.2      0.462      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49       3.5G    0.03214    0.02862    0.01444         29        640: 1\n",
      "tensor([1.16634], device='cuda:0', grad_fn=<AddBackward0>) tensor(8019.53857, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.83      0.201      0.472      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49       3.5G    0.03236    0.02956    0.01408         28        640: 1\n",
      "tensor([1.14235], device='cuda:0', grad_fn=<AddBackward0>) tensor(8110.44238, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.797      0.208       0.45       0.22\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49       3.5G      0.032    0.02883    0.01405         19        640: 1\n",
      "tensor([1.00360], device='cuda:0', grad_fn=<AddBackward0>) tensor(6523.36719, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.709      0.199      0.431      0.211\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49       3.5G    0.03172    0.02856    0.01369         22        640: 1\n",
      "tensor([0.99569], device='cuda:0', grad_fn=<AddBackward0>) tensor(6801.35400, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.738      0.213      0.446      0.214\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49       3.5G    0.03115    0.02873    0.01295         32        640: 1\n",
      "tensor([1.19221], device='cuda:0', grad_fn=<AddBackward0>) tensor(8016.79639, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.77      0.227       0.47      0.222\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49       3.5G     0.0306     0.0287    0.01291         18        640: 1\n",
      "tensor([1.32313], device='cuda:0', grad_fn=<AddBackward0>) tensor(8914.77441, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.742      0.217      0.472      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49       3.5G    0.02998    0.02831    0.01284         33        640: 1\n",
      "tensor([1.17373], device='cuda:0', grad_fn=<AddBackward0>) tensor(7554.95117, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.798      0.232      0.463      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49       3.5G    0.02965    0.02824    0.01194         38        640: 1\n",
      "tensor([1.08526], device='cuda:0', grad_fn=<AddBackward0>) tensor(6626.18848, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.845      0.235      0.475      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49       3.5G    0.02957     0.0279    0.01194         46        640: 1\n",
      "tensor([1.07570], device='cuda:0', grad_fn=<AddBackward0>) tensor(6733.33545, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.757      0.242      0.484      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49       3.5G    0.02964    0.02709    0.01178         27        640: 1\n",
      "tensor([0.97088], device='cuda:0', grad_fn=<AddBackward0>) tensor(6590.60645, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.757      0.249       0.49      0.232\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49       3.5G    0.02887    0.02776    0.01128         18        640: 1\n",
      "tensor([1.03186], device='cuda:0', grad_fn=<AddBackward0>) tensor(6844.53320, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.811      0.243      0.492      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49       3.5G    0.02899    0.02709    0.01235         24        640: 1\n",
      "tensor([1.13620], device='cuda:0', grad_fn=<AddBackward0>) tensor(7296.27295, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.823      0.237      0.499      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49       3.5G    0.02869    0.02797    0.01158         21        640: 1\n",
      "tensor([0.92608], device='cuda:0', grad_fn=<AddBackward0>) tensor(5899.95215, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.753      0.246        0.5      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49       3.5G    0.02827     0.0273    0.01116         28        640: 1\n",
      "tensor([1.02659], device='cuda:0', grad_fn=<AddBackward0>) tensor(7162.02881, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.832      0.237      0.482      0.226\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49       3.5G    0.02827    0.02709    0.01119         34        640: 1\n",
      "tensor([1.16258], device='cuda:0', grad_fn=<AddBackward0>) tensor(7452.48633, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.743      0.242      0.487      0.236\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49       3.5G    0.02798    0.02666    0.01136         29        640: 1\n",
      "tensor([0.99169], device='cuda:0', grad_fn=<AddBackward0>) tensor(6177.04541, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.731      0.246      0.496      0.241\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49       3.5G    0.02741    0.02654    0.01108         23        640: 1\n",
      "tensor([1.09807], device='cuda:0', grad_fn=<AddBackward0>) tensor(8295.73828, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.743      0.254      0.507      0.241\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49       3.5G     0.0269    0.02622    0.01098         28        640: 1\n",
      "tensor([0.96078], device='cuda:0', grad_fn=<AddBackward0>) tensor(5925.26172, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.776      0.254      0.505      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49       3.5G    0.02696    0.02604    0.01082         19        640: 1\n",
      "tensor([0.87430], device='cuda:0', grad_fn=<AddBackward0>) tensor(5665.55420, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.256       0.52      0.249\n",
      "\n",
      "50 epochs completed in 0.481 hours.\n",
      "Optimizer stripped from runs/train/exp33/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp33/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp33/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.256       0.52      0.249\n",
      "                   car       1048       4012      0.756      0.802      0.825      0.507\n",
      "                   van       1048        431      0.888      0.277      0.605       0.32\n",
      "                 truck       1048        166          1      0.117      0.676      0.348\n",
      "                  tram       1048         56          1     0.0936      0.743      0.327\n",
      "                person       1048        618      0.521      0.557      0.506      0.223\n",
      "        person_sitting       1048         20      0.566       0.15      0.236     0.0667\n",
      "               cyclist       1048        234          1     0.0517      0.328     0.0902\n",
      "                  misc       1048        138          1          0      0.239      0.111\n",
      "Results saved to \u001b[1mruns/train/exp33\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/01a419149c1449a4ae461cf4d4205cac\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.7784637096232305\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 1037.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.8247434452229866\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.506973636403499\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.7562660619850314\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.8020038358398278\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 3218.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.0983554103339344\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.3276610595339954\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.09019946586111713\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.051721236906422094\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (1.136199951171875, 7.327148914337158)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.0667916120396388, 0.5199270303132438)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.030625735227712135, 0.24886982844732056)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.04095668939863814, 0.8860986700594725)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.12708544365123722, 0.353855245954822)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.23925473846944356\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.11055056669225019\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.5382873675994864\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 316.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.5062898115070559\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.22250082050924042\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.521111286870238\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.5566343042071198\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.23713519927114007\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.23621212121212123\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.06674188644295027\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 0.565821882488549\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.15\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 344.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.02690155804157257, 0.08353935927152634)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.010820274241268635, 0.07207736372947693)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.02603806182742119, 0.04922189190983772)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.17115754940327452\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.7426402130247296\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.32689517090593967\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.09358791368137163\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.20909066151550468\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.6759177184333103\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.34803374765730927\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.11675111465577678\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.04224473610520363, 0.08763778209686279)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.020529577508568764, 0.0495586059987545)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.06457322835922241, 0.11769543588161469)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.42248038448245767\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.6051692066269234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.31998516367761554\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 0.8884284367286026\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.2771338624006838\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 119.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/01a419149c1449a4ae461cf4d4205cac\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp33\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.91 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 1.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1e659c6e-218e-4ad6-8d26-b890cff54774",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "1a7af6f8-4cb3-417b-befe-9c2ff58267ea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp33/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.623      0.548      0.564      0.327\n",
      "                   car       4952       1201      0.818      0.791      0.833       0.58\n",
      "                person       4952       4528      0.775      0.741      0.778      0.443\n",
      "             aeroplane       4952        285       0.66      0.604      0.648      0.347\n",
      "               bicycle       4952        337       0.71      0.603      0.661      0.384\n",
      "                  bird       4952        459      0.569      0.422      0.436      0.228\n",
      "                  boat       4952        263      0.412      0.338      0.322       0.16\n",
      "                bottle       4952        469      0.591      0.484      0.475       0.27\n",
      "                   bus       4952        213      0.633        0.6      0.647      0.468\n",
      "                   cat       4952        358      0.633      0.542      0.554      0.293\n",
      "                 chair       4952        756      0.591      0.335      0.388      0.207\n",
      "                   cow       4952        244      0.582       0.61      0.583      0.365\n",
      "           diningtable       4952        206      0.565      0.442      0.467      0.242\n",
      "                   dog       4952        489      0.555       0.48       0.49      0.265\n",
      "                 horse       4952        348      0.741      0.716      0.758      0.445\n",
      "             motorbike       4952        325      0.687      0.671      0.686      0.388\n",
      "           pottedplant       4952        480      0.524      0.334      0.335      0.151\n",
      "                 sheep       4952        242      0.517      0.591      0.528      0.342\n",
      "                  sofa       4952        239      0.561      0.439      0.441      0.256\n",
      "                 train       4952        282      0.684      0.674      0.685      0.374\n",
      "             tvmonitor       4952        308       0.65      0.542       0.56      0.328\n",
      "Speed: 0.1ms pre-process, 1.6ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp149\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp33/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "\n",
    "#1e-4 新数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9aee4303-8538-4325-a975-8d79773594de",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "90d861da-a910-4194-a223-528b7cd7febf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp33/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.784      0.248      0.519       0.25\n",
      "                   car       2244       8711      0.762      0.809      0.834      0.512\n",
      "                   van       2244        861      0.873      0.288      0.561      0.306\n",
      "                 truck       2244        333      0.961       0.15      0.658      0.361\n",
      "                  tram       2244        138          1     0.0358      0.699      0.302\n",
      "                person       2244       1286      0.499      0.582      0.515      0.229\n",
      "        person_sitting       2244         89      0.665     0.0447      0.291     0.0844\n",
      "               cyclist       2244        496      0.936     0.0589       0.32     0.0866\n",
      "                  misc       2244        284      0.572     0.0141      0.274      0.121\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp150\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp33/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "#1e-4 旧数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "53ff350f-7e76-4fef-9fd6-9de830ad7416",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f93a9d0d-623d-4dca-bfaf-9eee5a0d774b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7888258-c0cd-471b-8ca3-4ec13e4a2202",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3062af1d-cf36-4b7a-b26d-a02d8157de4a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d32c41e7-7d44-4593-ba33-e1e220d33aaa",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4bbd6035-397f-49b8-92e7-98ec221f3c22",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1e-4可做基准。接下来试试温度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "4445baf4-1876-4dbe-af2a-6bd84ebbb612",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_Lwf: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/temp_test.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=50, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0, Lwf_enable=True, Lwf_lambda=0.0001, Lwf_temperature=10.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/8ee50dc05f594120b253c82094df23d8\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "Overriding model.yaml nc=26 with nc=8\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 2501 ima\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.08 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp36/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp36\u001b[0m\n",
      "Starting training for 50 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/49       3.5G     0.0845     0.0491    0.07213         32        640: 1\n",
      "tensor([2.27805], device='cuda:0', grad_fn=<AddBackward0>) tensor(12682.11328, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675     0.0236      0.245     0.0575     0.0245\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/49       3.5G    0.06473    0.04712     0.0612         14        640: 1\n",
      "tensor([1.41934], device='cuda:0', grad_fn=<AddBackward0>) tensor(9028.45020, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.845      0.104      0.239     0.0983\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/49       3.5G    0.05916     0.0427    0.05755         33        640: 1\n",
      "tensor([2.01833], device='cuda:0', grad_fn=<AddBackward0>) tensor(11413.08984, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.723      0.262      0.383      0.165\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/49       3.5G     0.0556    0.04104    0.05202         29        640: 1\n",
      "tensor([2.15898], device='cuda:0', grad_fn=<AddBackward0>) tensor(14576.85254, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.549      0.371       0.39      0.164\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/49       3.5G    0.05268    0.03996    0.04875         21        640: 1\n",
      "tensor([2.10045], device='cuda:0', grad_fn=<AddBackward0>) tensor(13503.97461, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.582       0.38      0.445      0.199\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/49       3.5G    0.05125    0.04009     0.0449         21        640: 1\n",
      "tensor([1.82527], device='cuda:0', grad_fn=<AddBackward0>) tensor(11122.08301, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.432      0.418      0.401      0.178\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/49       3.5G    0.04966    0.03939    0.04151         37        640: 1\n",
      "tensor([1.78099], device='cuda:0', grad_fn=<AddBackward0>) tensor(10506.55078, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.636      0.411      0.511      0.243\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/49       3.5G    0.04813    0.03852    0.03914         24        640: 1\n",
      "tensor([2.04931], device='cuda:0', grad_fn=<AddBackward0>) tensor(13185.88867, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.614      0.401      0.502      0.224\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/49       3.5G    0.04675    0.03801    0.03623         13        640: 1\n",
      "tensor([1.87344], device='cuda:0', grad_fn=<AddBackward0>) tensor(12022.46680, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.627      0.391      0.479      0.227\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/49       3.5G     0.0458    0.03738    0.03476         22        640: 1\n",
      "tensor([1.59775], device='cuda:0', grad_fn=<AddBackward0>) tensor(11521.55957, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.589      0.441      0.502      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/49       3.5G    0.04505    0.03673    0.03321         32        640: 1\n",
      "tensor([1.87437], device='cuda:0', grad_fn=<AddBackward0>) tensor(12694.50391, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.677      0.378       0.51      0.235\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/49       3.5G    0.04434    0.03673     0.0321         24        640: 1\n",
      "tensor([1.84706], device='cuda:0', grad_fn=<AddBackward0>) tensor(12148.74707, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.658      0.414      0.506      0.237\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/49       3.5G    0.04411    0.03607    0.03108         33        640: 1\n",
      "tensor([1.53756], device='cuda:0', grad_fn=<AddBackward0>) tensor(9523.40918, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.66      0.448      0.544      0.234\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/49       3.5G    0.04286    0.03633    0.02895         15        640: 1\n",
      "tensor([1.47744], device='cuda:0', grad_fn=<AddBackward0>) tensor(11163.98828, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.648      0.414      0.506      0.221\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/49       3.5G    0.04218    0.03587    0.02857         15        640: 1\n",
      "tensor([1.61292], device='cuda:0', grad_fn=<AddBackward0>) tensor(10947.97852, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.691      0.413      0.532      0.236\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/49       3.5G    0.04174    0.03574    0.02756         27        640: 1\n",
      "tensor([1.53418], device='cuda:0', grad_fn=<AddBackward0>) tensor(10990.83691, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.633      0.454      0.539      0.251\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/49       3.5G    0.04143    0.03568    0.02708         16        640: 1\n",
      "tensor([1.51686], device='cuda:0', grad_fn=<AddBackward0>) tensor(9951.58398, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.686      0.413      0.517      0.238\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/49       3.5G    0.04099    0.03541    0.02579         31        640: 1\n",
      "tensor([1.55977], device='cuda:0', grad_fn=<AddBackward0>) tensor(10495.61133, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.671      0.437      0.522       0.25\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/49       3.5G    0.04034     0.0352    0.02528         24        640: 1\n",
      "tensor([1.44433], device='cuda:0', grad_fn=<AddBackward0>) tensor(9875.45215, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.702      0.448      0.529      0.245\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/49       3.5G    0.03993    0.03468    0.02447         28        640: 1\n",
      "tensor([2.07597], device='cuda:0', grad_fn=<AddBackward0>) tensor(12543.83887, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.734      0.388      0.502       0.24\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/49       3.5G    0.03979    0.03425    0.02411          9        640: 1\n",
      "tensor([1.60501], device='cuda:0', grad_fn=<AddBackward0>) tensor(9522.76270, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.573      0.419      0.518      0.239\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/49       3.5G    0.03926    0.03452    0.02277         37        640: 1\n",
      "tensor([1.66990], device='cuda:0', grad_fn=<AddBackward0>) tensor(11408.61816, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.63      0.392      0.505      0.242\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/49       3.5G    0.03861    0.03392    0.02365         26        640: 1\n",
      "tensor([1.45503], device='cuda:0', grad_fn=<AddBackward0>) tensor(9026.48535, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.685       0.45      0.541      0.253\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/49       3.5G    0.03812     0.0342    0.02318         25        640: 1\n",
      "tensor([1.47584], device='cuda:0', grad_fn=<AddBackward0>) tensor(10381.35645, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.717      0.425      0.531      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/49       3.5G    0.03794    0.03294    0.02234         30        640: 1\n",
      "tensor([1.44015], device='cuda:0', grad_fn=<AddBackward0>) tensor(9347.53320, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.676      0.455      0.561      0.274\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/49       3.5G    0.03749     0.0326     0.0216         19        640: 1\n",
      "tensor([1.51181], device='cuda:0', grad_fn=<AddBackward0>) tensor(11053.26074, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.726      0.445      0.564      0.275\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/49       3.5G    0.03737    0.03274    0.02132         14        640: 1\n",
      "tensor([1.42634], device='cuda:0', grad_fn=<AddBackward0>) tensor(10380.45508, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.683      0.451      0.578       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/49       3.5G    0.03696    0.03308    0.02031         38        640: 1\n",
      "tensor([1.66260], device='cuda:0', grad_fn=<AddBackward0>) tensor(11201.20117, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.615      0.435      0.553      0.267\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/49       3.5G    0.03617    0.03248    0.02069         20        640: 1\n",
      "tensor([1.10467], device='cuda:0', grad_fn=<AddBackward0>) tensor(8028.73486, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.73      0.484       0.58      0.264\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/49       3.5G    0.03652    0.03315    0.01997         30        640: 1\n",
      "tensor([1.84408], device='cuda:0', grad_fn=<AddBackward0>) tensor(11843.58789, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.669      0.462      0.566       0.27\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/49       3.5G    0.03623    0.03251    0.01999         21        640: 1\n",
      "tensor([1.46744], device='cuda:0', grad_fn=<AddBackward0>) tensor(10313.63086, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.657       0.46      0.569      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/49       3.5G    0.03531    0.03176    0.01973         29        640: 1\n",
      "tensor([1.50176], device='cuda:0', grad_fn=<AddBackward0>) tensor(10736.15332, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.725      0.484      0.564      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/49       3.5G    0.03548    0.03283    0.01943         28        640: 1\n",
      "tensor([1.39070], device='cuda:0', grad_fn=<AddBackward0>) tensor(10339.44922, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.719      0.474      0.577       0.28\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/49       3.5G    0.03493    0.03209    0.01947         19        640: 1\n",
      "tensor([1.09070], device='cuda:0', grad_fn=<AddBackward0>) tensor(7118.56885, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.681       0.46      0.549      0.262\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/49       3.5G    0.03491    0.03177    0.01842         22        640: 1\n",
      "tensor([1.14463], device='cuda:0', grad_fn=<AddBackward0>) tensor(8307.71777, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.718      0.491      0.596      0.269\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/49       3.5G    0.03428    0.03218    0.01791         32        640: 1\n",
      "tensor([1.41333], device='cuda:0', grad_fn=<AddBackward0>) tensor(9742.22168, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.711      0.481      0.603      0.294\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/49       3.5G    0.03396    0.03203    0.01813         18        640: 1\n",
      "tensor([1.51469], device='cuda:0', grad_fn=<AddBackward0>) tensor(10653.98242, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.694      0.499      0.576      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/49       3.5G     0.0332    0.03169    0.01792         33        640: 1\n",
      "tensor([1.40223], device='cuda:0', grad_fn=<AddBackward0>) tensor(8935.38574, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.738       0.47      0.582      0.285\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/49       3.5G    0.03314    0.03177    0.01689         38        640: 1\n",
      "tensor([1.20159], device='cuda:0', grad_fn=<AddBackward0>) tensor(7219.38818, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.723      0.471      0.589      0.289\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/49       3.5G    0.03292    0.03146    0.01696         46        640: 1\n",
      "tensor([1.20737], device='cuda:0', grad_fn=<AddBackward0>) tensor(7462.57520, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.71      0.497      0.599      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/49       3.5G    0.03306    0.03056    0.01687         27        640: 1\n",
      "tensor([1.09589], device='cuda:0', grad_fn=<AddBackward0>) tensor(7422.69580, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.734      0.508       0.61      0.298\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/49       3.5G    0.03232    0.03151    0.01653         18        640: 1\n",
      "tensor([1.15771], device='cuda:0', grad_fn=<AddBackward0>) tensor(7661.24268, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.759      0.489      0.615      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/49       3.5G    0.03239    0.03055    0.01758         24        640: 1\n",
      "tensor([1.27374], device='cuda:0', grad_fn=<AddBackward0>) tensor(8204.72754, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.746      0.485      0.616      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/49       3.5G    0.03222    0.03174    0.01683         21        640: 1\n",
      "tensor([1.01179], device='cuda:0', grad_fn=<AddBackward0>) tensor(6135.22949, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.725      0.517      0.611      0.306\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/49       3.5G    0.03181    0.03108    0.01614         28        640: 1\n",
      "tensor([1.19550], device='cuda:0', grad_fn=<AddBackward0>) tensor(8049.48438, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.714      0.486      0.601      0.292\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/49       3.5G    0.03188    0.03095    0.01647         34        640: 1\n",
      "tensor([1.32157], device='cuda:0', grad_fn=<AddBackward0>) tensor(8574.46191, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.733      0.491       0.61      0.301\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/49       3.5G    0.03164    0.03044    0.01658         29        640: 1\n",
      "tensor([1.06371], device='cuda:0', grad_fn=<AddBackward0>) tensor(5954.72217, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.737      0.511      0.627      0.303\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/49       3.5G      0.031    0.03036    0.01628         23        640: 1\n",
      "tensor([1.30716], device='cuda:0', grad_fn=<AddBackward0>) tensor(9619.44922, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.747       0.51      0.632      0.317\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/49       3.5G    0.03065    0.03022    0.01575         28        640: 1\n",
      "tensor([0.98629], device='cuda:0', grad_fn=<AddBackward0>) tensor(5484.12109, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.738      0.519      0.628      0.315\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/49       3.5G     0.0305    0.02985     0.0159         19        640: 1\n",
      "tensor([0.93872], device='cuda:0', grad_fn=<AddBackward0>) tensor(5758.22705, device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.744      0.513      0.625      0.312\n",
      "\n",
      "50 epochs completed in 0.488 hours.\n",
      "Optimizer stripped from runs/train/exp36/weights/last.pt, 14.4MB\n",
      "Optimizer stripped from runs/train/exp36/weights/best.pt, 14.4MB\n",
      "\n",
      "Validating runs/train/exp36/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.747       0.51      0.633      0.317\n",
      "                   car       1048       4012      0.728      0.843      0.853      0.529\n",
      "                   van       1048        431      0.902       0.55      0.784      0.432\n",
      "                 truck       1048        166          1      0.658      0.799      0.467\n",
      "                  tram       1048         56      0.807      0.748      0.847      0.386\n",
      "                person       1048        618      0.469      0.615      0.552      0.259\n",
      "        person_sitting       1048         20      0.531       0.25      0.303      0.103\n",
      "               cyclist       1048        234      0.803      0.209      0.492      0.165\n",
      "                  misc       1048        138      0.736       0.21      0.432      0.198\n",
      "Results saved to \u001b[1mruns/train/exp36\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/8ee50dc05f594120b253c82094df23d8\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_f1                         : 0.7814920003031933\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_false_positives            : 1261.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5                     : 0.8528321988590635\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_mAP@.5:.95                 : 0.5289401714317508\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_precision                  : 0.7283708713996415\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_recall                     : 0.8429710867397806\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     car_true_positives             : 3382.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_f1                     : 0.33219893968665154\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_false_positives        : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5                 : 0.4922562949813555\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_mAP@.5:.95             : 0.16462532125265042\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_precision              : 0.8032761098334869\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_recall                 : 0.2093983482872372\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cyclist_true_positives         : 49.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [785]                     : (1.273740291595459, 10.075133323669434)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [100]          : (0.057517780176964496, 0.632483004948948)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [100]     : (0.024513068695888957, 0.3169512918646566)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [100]        : (0.023616718070177383, 0.8450490377588316)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [100]           : (0.10409935387569225, 0.5189071744942919)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_f1                        : 0.32695105678776254\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5                    : 0.43193983218613435\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_mAP@.5:.95                : 0.19776669075019904\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_precision                 : 0.7361044645870517\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_recall                    : 0.21014492753623187\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     misc_true_positives            : 29.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_f1                      : 0.5322211027683156\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_false_positives         : 430.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5                  : 0.5518007443340742\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_mAP@.5:.95              : 0.2591576255185414\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_precision               : 0.46914857294994333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_recall                  : 0.6148867313915858\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_f1              : 0.3399912475664879\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_false_positives : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5          : 0.30259085553693066\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_mAP@.5:.95      : 0.1034623755403858\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_precision       : 0.5312072658459159\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_recall          : 0.25\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_sitting_true_positives  : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_support                 : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     person_true_positives          : 380.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [100]           : (0.03049735724925995, 0.08449896425008774)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [100]           : (0.015751177445054054, 0.07212729752063751)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [100]           : (0.029853546991944313, 0.04910058155655861)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_f1                        : 0.7768139710716989\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_false_positives           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5                    : 0.846833218156319\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_mAP@.5:.95                : 0.38618095934288876\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_precision                 : 0.8073722926073506\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_recall                    : 0.748484499990524\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     tram_true_positives            : 42.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_f1                       : 0.7933926515128452\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_false_positives          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5                   : 0.7990825104127218\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_mAP@.5:.95               : 0.4665077696233172\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_precision                : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_recall                   : 0.6575400460701665\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     truck_true_positives           : 109.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [100]             : (0.040488772094249725, 0.09104443341493607)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [100]             : (0.017246531322598457, 0.04455951973795891)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [100]             : (0.06284309178590775, 0.11565583944320679)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_f1                         : 0.6832703223232003\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_false_positives            : 26.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5                     : 0.7841697706439746\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_mAP@.5:.95                 : 0.43192913038982245\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_precision                  : 0.9020924928538199\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_recall                     : 0.5498839907192575\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     van_true_positives             : 237.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [100]                    : (0.0004960000000000005, 0.07019108280254777)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [100]                    : (0.0004960000000000005, 0.009583609341825903)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/8ee50dc05f594120b253c82094df23d8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.1625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp36\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (2.04 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for metadata to finish uploading (timeout is 3600 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Uploading 211 metrics, params and output messages\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Still uploading 9 file(s), remaining 84.67 KB/1.84 MB\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_Lwf.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/temp_test.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--Lwf_enable \\\n",
    "--Lwf_temperature 10.0 \\\n",
    "--Lwf_lambda 1e-4 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "36598324-72de-43f0-8411-2275bf3a789d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "b1a0f157-b161-4e3b-bc64-bd92eb8e6e06",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/exp36/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032       0.56      0.483      0.482      0.261\n",
      "                   car       4952       1201      0.775       0.78      0.804      0.539\n",
      "                person       4952       4528       0.73      0.691      0.732      0.388\n",
      "             aeroplane       4952        285      0.634      0.547      0.571      0.274\n",
      "               bicycle       4952        337      0.689       0.57      0.609      0.341\n",
      "                  bird       4952        459      0.463      0.333      0.331      0.153\n",
      "                  boat       4952        263      0.344      0.319      0.249       0.11\n",
      "                bottle       4952        469      0.505      0.411      0.367      0.187\n",
      "                   bus       4952        213      0.579      0.554      0.569        0.4\n",
      "                   cat       4952        358      0.602      0.449      0.432      0.212\n",
      "                 chair       4952        756      0.562        0.3      0.338      0.169\n",
      "                   cow       4952        244      0.493      0.561      0.507      0.295\n",
      "           diningtable       4952        206      0.592       0.32      0.384      0.155\n",
      "                   dog       4952        489      0.439      0.317      0.335      0.163\n",
      "                 horse       4952        348      0.646      0.664      0.669      0.352\n",
      "             motorbike       4952        325      0.598      0.625      0.602      0.318\n",
      "           pottedplant       4952        480      0.434      0.267      0.276       0.12\n",
      "                 sheep       4952        242      0.515       0.55      0.499      0.306\n",
      "                  sofa       4952        239      0.474      0.314      0.328      0.174\n",
      "                 train       4952        282       0.58      0.599       0.57       0.29\n",
      "             tvmonitor       4952        308      0.542      0.487      0.475      0.265\n",
      "Speed: 0.1ms pre-process, 1.4ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp155\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp36/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 10.0 新"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f2510907-6ea6-41f4-8c0e-98f4700354e2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "c3cab48c-3092-4a09-b1af-5fadb9112fa3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp36/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.776      0.497      0.632      0.317\n",
      "                   car       2244       8711      0.729      0.849      0.858      0.534\n",
      "                   van       2244        861      0.844       0.51      0.711      0.397\n",
      "                 truck       2244        333      0.987      0.678      0.847      0.521\n",
      "                  tram       2244        138      0.796       0.62      0.734      0.338\n",
      "                person       2244       1286      0.434      0.617      0.558      0.247\n",
      "        person_sitting       2244         89      0.682      0.281      0.338      0.113\n",
      "               cyclist       2244        496      0.812       0.22      0.496      0.168\n",
      "                  misc       2244        284      0.921      0.206      0.517      0.215\n",
      "Speed: 0.1ms pre-process, 0.9ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp156\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp36/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "#10.0 旧数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e9675e7-af2d-42a0-8659-5c746cc5065b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1da5ff89-bdbb-47f4-88ad-045e115df685",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0aa45a05-5d67-48c1-90c5-d49a20dccac2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "79bcb105-5c98-4aae-9209-8981c7dbf2c8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0159debf-cfe5-44e9-ab42-da5f8265bed7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e3c80ced-f55e-459e-b638-fa8882ab8fa5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3f8921fe-0784-4366-a579-c0386d509e42",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "10285ded-5162-45e8-8b5a-7b1414bf9100",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/increment_VOC_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 3868f729 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.671      0.194      0.407      0.197\n",
      "                   car       2244       8711      0.545      0.824      0.778       0.43\n",
      "                   van       2244        861      0.919     0.0916      0.433      0.229\n",
      "                 truck       2244        333          1     0.0151      0.544      0.309\n",
      "                  tram       2244        138          1          0      0.481      0.211\n",
      "                person       2244       1286      0.309      0.606      0.494      0.237\n",
      "        person_sitting       2244         89      0.241     0.0108      0.129     0.0321\n",
      "               cyclist       2244        496      0.355    0.00143       0.24     0.0606\n",
      "                  misc       2244        284          1          0      0.159     0.0675\n",
      "Speed: 0.1ms pre-process, 1.1ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp157\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/increment_VOC_Lwf/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "b843573d-b54a-436f-8392-7192c614a908",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/increment_VOC_Lwf/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 3868f729 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.723      0.668      0.724      0.457\n",
      "                   car       4952       1201      0.775      0.851       0.88      0.636\n",
      "                person       4952       4528      0.805      0.778      0.842      0.521\n",
      "             aeroplane       4952        285       0.88      0.719      0.813      0.486\n",
      "               bicycle       4952        337      0.851      0.712      0.823      0.527\n",
      "                  bird       4952        459      0.689      0.558      0.629      0.362\n",
      "                  boat       4952        263      0.563      0.593      0.574      0.299\n",
      "                bottle       4952        469      0.626      0.678      0.685      0.429\n",
      "                   bus       4952        213      0.793      0.718      0.797      0.619\n",
      "                   cat       4952        358       0.79      0.662      0.754      0.467\n",
      "                 chair       4952        756      0.577       0.56      0.578      0.353\n",
      "                   cow       4952        244       0.65      0.725      0.735      0.501\n",
      "           diningtable       4952        206      0.746      0.613      0.684      0.406\n",
      "                   dog       4952        489      0.757      0.556      0.714      0.431\n",
      "                 horse       4952        348      0.858      0.714      0.822      0.523\n",
      "             motorbike       4952        325      0.792      0.704      0.804      0.488\n",
      "           pottedplant       4952        480      0.548      0.498      0.466      0.232\n",
      "                 sheep       4952        242      0.653      0.756      0.748      0.511\n",
      "                  sofa       4952        239      0.656      0.577      0.625       0.41\n",
      "                 train       4952        282      0.814      0.716      0.809      0.493\n",
      "             tvmonitor       4952        308      0.642      0.665      0.691      0.453\n",
      "Speed: 0.1ms pre-process, 2.0ms inference, 2.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp158\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/increment_VOC_Lwf/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e51de780-e1eb-4278-afca-54872a00e78f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e564c2e4-4493-4d9e-855b-17711a3ed21e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/VOCKITTI.yaml, weights=['runs/train/increment_VOC_plain/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 3868f729 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/test2007.cache... 4952 image\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       4952      12032      0.783      0.766      0.821      0.569\n",
      "                   car       4952       1201      0.812      0.899      0.924      0.709\n",
      "                person       4952       4528      0.846      0.806      0.881      0.583\n",
      "             aeroplane       4952        285      0.907      0.821      0.882      0.594\n",
      "               bicycle       4952        337       0.91      0.813      0.904      0.644\n",
      "                  bird       4952        459        0.8      0.732      0.796      0.516\n",
      "                  boat       4952        263      0.699      0.673      0.731      0.424\n",
      "                bottle       4952        469      0.734      0.729      0.778      0.527\n",
      "                   bus       4952        213      0.835       0.84      0.887      0.731\n",
      "                   cat       4952        358       0.86      0.813      0.854      0.619\n",
      "                 chair       4952        756      0.624      0.618      0.662      0.437\n",
      "                   cow       4952        244      0.742      0.849      0.856      0.625\n",
      "           diningtable       4952        206       0.77      0.684      0.756      0.518\n",
      "                   dog       4952        489      0.834      0.736       0.85      0.586\n",
      "                 horse       4952        348      0.855      0.844      0.897       0.63\n",
      "             motorbike       4952        325      0.828      0.787      0.878      0.573\n",
      "           pottedplant       4952        480      0.638      0.557       0.61      0.333\n",
      "                 sheep       4952        242      0.721      0.821      0.841       0.61\n",
      "                  sofa       4952        239      0.672      0.699      0.736       0.54\n",
      "                 train       4952        282      0.847      0.812      0.874      0.603\n",
      "             tvmonitor       4952        308      0.735      0.792      0.817      0.585\n",
      "Speed: 0.1ms pre-process, 1.9ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp159\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "\n",
    "model = f'runs/train/increment_VOC_plain/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ebd5a6e3-d20e-46ba-bead-e2f55691f5ed",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6d951a9a-6689-4438-82c7-1ca06298f267",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "21e69e8a-3625-4439-ac75-bc0ef0e9cb6d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8121c631-2df4-4d16-b1fe-570a2b2a81bb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1931908f-24f3-4041-a1d2-1062d270b68d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "05684a2f-9a4a-4c6d-af76-d1fbd2bfcee1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b0709b2d-692f-4395-b6df-587c76642d80",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d3189423-3147-4118-a9f8-0f3d2d8826ce",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4c93805d-9a66-4187-b573-644b54e49744",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "582b8f0a-f0c5-4df7-9951-37416ea73ed7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d08b8d22-27e2-48b1-9512-9e5f63f9d0f8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "555d959d-1609-455e-8034-50a648381112",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "793eccf4-16a0-4593-859c-f9165f277c0f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c84e67e6-5fa6-41e0-99d2-9f41f3675989",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "8c1ca5a8-a9a9-41c9-b193-868f0a69aed8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp23/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test... 2244 images, 0 bac\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/test.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.936      0.139      0.181      0.097\n",
      "                   car       2244       8711      0.838      0.619      0.739      0.423\n",
      "                   van       2244        861          1          0     0.0601      0.039\n",
      "                 truck       2244        333          1          0      0.153      0.096\n",
      "                  tram       2244        138          1          0    0.00531    0.00256\n",
      "                person       2244       1286      0.647      0.492      0.475      0.208\n",
      "        person_sitting       2244         89          1          0   0.000971   0.000436\n",
      "               cyclist       2244        496          1          0    0.00105   0.000383\n",
      "                  misc       2244        284          1          0     0.0109    0.00715\n",
      "Speed: 0.1ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp140\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp23/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "239d215a-60af-4b79-98dc-83dd17cb44f9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti_increment.yaml, weights=['runs/train/exp27/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_VOCKITTI summary: 157 layers, 7080247 parameters, 0 gradients, 16.0 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.485        0.3      0.324      0.138\n",
      "                   car       2244       8711      0.718      0.286      0.408      0.219\n",
      "                   van       2244        861      0.573      0.257       0.34      0.174\n",
      "                 truck       2244        333       0.46      0.384      0.386      0.182\n",
      "                  tram       2244        138       0.45      0.442      0.394      0.147\n",
      "                person       2244       1286      0.423      0.359      0.352      0.141\n",
      "        person_sitting       2244         89      0.267      0.303      0.259     0.0758\n",
      "               cyclist       2244        496       0.45      0.181      0.214     0.0732\n",
      "                  misc       2244        284      0.543      0.184       0.24     0.0928\n",
      "Speed: 0.1ms pre-process, 0.8ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp141\u001b[0m\n",
      "Test set val successful|ly!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp27/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti_increment.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successful|ly!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c097be19-7bce-4a56-b509-4ef558c7908f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "bffa6a1e-76e9-41ec-bac3-4beaaf51c07c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "update_testsets = f\" \\\n",
    "rm ../datasets/VOC/images/val/* &&\\\n",
    "cp /root/autodl-tmp/datasets/kitti/images/origin_val/* ../datasets/VOC/images/val/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "2bf4aeb1-9e07-4bd5-96a8-1b59d56326cb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "#这个是使用ewc的增量训练在没有加雾的第一个数据集上的效果\n",
    "\n",
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/datasets/kitti/images/origin_test/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0da3adf1-e170-4d41-8f08-5a57bfc4ee65",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2765b3a7-39ba-4cad-af30-2edfbe43b278",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "9349e4c3-b714-41aa-a041-656f34e59dce",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "ckpt = torch.load('./runs/train/fog_02/weights/best.pt', map_location=\"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "a1da871b-b1f0-416e-8047-f81019ac94aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "model.24.m.0.weight torch.Size([39, 128, 1, 1])\n",
      "model.24.m.0.bias torch.Size([39])\n",
      "model.24.m.1.weight torch.Size([39, 256, 1, 1])\n",
      "model.24.m.1.bias torch.Size([39])\n",
      "model.24.m.2.weight torch.Size([39, 512, 1, 1])\n",
      "model.24.m.2.bias torch.Size([39])\n"
     ]
    }
   ],
   "source": [
    "for n, p in ckpt['model'].named_parameters():\n",
    "    if '24' in n :\n",
    "        print(n, p.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "348f1a78-b22c-4220-b3f2-01217811606d",
   "metadata": {},
   "outputs": [],
   "source": [
    "ckpt2 = torch.load('./runs/train/increment_VOC_plain/weights/best.pt', map_location=\"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "8e6284a2-aef0-4e92-8ffd-56a56a09c520",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "model.24.m.0.weight torch.Size([93, 128, 1, 1])\n",
      "model.24.m.0.bias torch.Size([93])\n",
      "model.24.m.1.weight torch.Size([93, 256, 1, 1])\n",
      "model.24.m.1.bias torch.Size([93])\n",
      "model.24.m.2.weight torch.Size([93, 512, 1, 1])\n",
      "model.24.m.2.bias torch.Size([93])\n"
     ]
    }
   ],
   "source": [
    "for n, p in ckpt2['model'].named_parameters():\n",
    "    if '24' in n :\n",
    "        print(n, p.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c4c78ce4-70e4-4d86-b537-24faeae2bd1c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 39 -- 3个大小的输出，其中每个输出是13 （5 + 8）\n",
    "# 93 -- 3个大小的输出，其中每个输出是31  (5 + 26)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "d2b0006a-0431-4c35-b279-6b72969500bc",
   "metadata": {},
   "outputs": [],
   "source": [
    "anchors = [\n",
    "   [10, 13, 16, 30, 33, 23], # P3/8\n",
    "   [30, 61, 62, 45, 59, 119], # P4/16\n",
    "   [116, 90, 156, 198, 373, 326], # P5/32\n",
    "]\n",
    "f = [17, 20, 23]\n",
    "args = [26, anchors]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "6f7ae0f5-ee00-4a01-bf5b-09ee93460cef",
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "'str' object is not callable",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[25], line 3\u001b[0m\n\u001b[1;32m      1\u001b[0m n \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m      2\u001b[0m m \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mDetect\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m----> 3\u001b[0m nn\u001b[38;5;241m.\u001b[39mSequential(\u001b[38;5;241m*\u001b[39m(m(\u001b[38;5;241m*\u001b[39margs) \u001b[38;5;28;01mfor\u001b[39;00m _ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(n))) \u001b[38;5;28;01mif\u001b[39;00m n \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m \u001b[43mm\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mTypeError\u001b[0m: 'str' object is not callable"
     ]
    }
   ],
   "source": [
    "n = 1\n",
    "m = 'Detect'\n",
    "nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a666a4fd-4d72-4518-abbc-a8d36eb0d3e5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "65aa771d-a7be-4673-864a-9c7589a5f8b3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "459337b7-8ee6-4b38-a529-088d50775999",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e38cae1a-a93c-41d9-9646-467e679cc956",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2b6b59f9-0d85-4948-b2f1-7458b111901e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9f4f6bb-4937-404b-bfc8-f926384e0ef9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "dc388598-e3d9-4610-8106-a7801b955d77",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_VOCKITTI.yaml, data=data/VOCKITTIval.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=increment_VOC_plain, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=./runs/train/fog_02/weights/si.pt, SI_lambda=10.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/db8974a1394943c69744a88dcab3bfea\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     83607  models.yolo.Detect                      [26, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_VOCKITTI summary: 214 layers, 7089751 parameters, 7089751 gradients, 16.2 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/train2007.cache... 16551 im\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/VOC/labels/val.cache... 1048 images, 0 b\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m3.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/increment_VOC_plain3/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/increment_VOC_plain3\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99       3.6G    0.08795    0.04699    0.07669        104        640:  \u001b[1;38;5;214mCOMET WARNING:\u001b[0m Unknown error retrieving Conda information\n",
      "       0/99       3.6G    0.06659    0.04338    0.06459         36        640: 1\n",
      "tensor([1.00370], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01974], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.104      0.136     0.0672\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99       3.6G    0.05022    0.03611    0.04272         58        640: 1\n",
      "tensor([1.30998], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.06765], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911      0.132      0.158     0.0731\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99       3.6G    0.04856    0.03608    0.03077         37        640: 1\n",
      "tensor([1.03997], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.16715], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.902      0.124      0.152     0.0728\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99       3.6G    0.04724    0.03703    0.02941         46        640: 1\n",
      "tensor([1.15950], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.32708], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.922      0.128      0.152     0.0783\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99       3.6G    0.04616    0.03688    0.02729         39        640: 1\n",
      "tensor([1.20518], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.38221], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.913      0.136      0.163     0.0848\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99       3.6G    0.04524    0.03667    0.02605         28        640: 1\n",
      "tensor([1.08640], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.41225], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.921      0.141      0.164     0.0848\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99       3.6G    0.04478    0.03673    0.02537         39        640: 1\n",
      "tensor([1.17855], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.42591], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.92      0.137      0.158     0.0779\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99       3.6G    0.04447    0.03662    0.02571         98        640:  ^C\n",
      "       7/99       3.6G    0.04447    0.03662    0.02571         98        640:  \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTIval.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/fog_02/weights/si.pt \\\n",
    "--SI_lambda 1e1 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "928e55fa-1eb7-4952-8bd3-486270a5a935",
   "metadata": {},
   "outputs": [],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_VOCKITTI.yaml \\\n",
    "--data data/VOCKITTI.yaml \\\n",
    "--epochs 50 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/fog_02/weights/si.pt \\\n",
    "--SI_lambda 1e1 \\\n",
    "--name increment_VOC_SI \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7d449b48-0dbe-405d-836e-06dfe2619387",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "02977d69-5e12-421b-b73c-05422bb3cd40",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2f613e3c-121a-4e41-9a45-399bbaa488f1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "raw",
   "id": "9b629d28-e04a-468f-8f68-411a909f8d26",
   "metadata": {},
   "source": [
    "测试路径\n",
    "\n",
    "runs/train/exp/weights/best.pt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "0a848d08-fb7f-462f-9dda-a150169d2a75",
   "metadata": {},
   "outputs": [],
   "source": [
    "freeze = [0, 1]  # 冻结全部Backbone（P1-P5）\n",
    "freeze_layer = ' '.join([str(i) for i in range(25) if i in freeze]) # 冻结0 ~ 23\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4e97523a-2db4-4a9e-8d39-bed14cd72728",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0, 1], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 bda8da72 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/87a22d50bdbb40579a66b747bec64505\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "freezing model.0.conv.weight\n",
      "freezing model.0.bn.weight\n",
      "freezing model.0.bn.bias\n",
      "freezing model.1.conv.weight\n",
      "freezing model.1.bn.weight\n",
      "freezing model.1.bn.bias\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp84/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp84\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      2.86G    0.03515      0.034   0.006785        128        640: 1\n",
      "tensor([0.86262], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.712      0.441      0.491      0.281\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      2.86G    0.03299    0.02905   0.004242        133        640: 1\n",
      "tensor([0.90196], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.568      0.674       0.39\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      2.86G    0.03597    0.03023   0.004455        173        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 129605 ms: Connection timed out\n",
      "       2/99      2.86G     0.0363    0.03046   0.004678        131        640: 1\n",
      "tensor([0.93468], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.754      0.547      0.619      0.355\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      2.86G    0.03684    0.03155   0.005247        108        640: 1\n",
      "tensor([0.83491], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.692      0.553      0.612      0.336\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      2.86G    0.03652    0.03123   0.005326        156        640: 1\n",
      "tensor([0.94976], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.775      0.578      0.665      0.374\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      2.86G    0.03598      0.031   0.005121        123        640: 1\n",
      "tensor([0.82424], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.755      0.551      0.624      0.334\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      2.86G    0.03504    0.03018   0.004703        174        640: 1\n",
      "tensor([0.99024], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.593      0.687      0.385\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      2.86G    0.03457    0.02962   0.004679        166        640: 1\n",
      "tensor([1.05898], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871      0.571      0.683      0.371\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      2.86G     0.0343    0.02975   0.004337        152        640: 1\n",
      "tensor([0.88988], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.81      0.582      0.669      0.378\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      2.86G    0.03383    0.02958   0.004483        136        640: 1\n",
      "tensor([0.84589], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.8      0.595      0.674      0.388\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      2.86G    0.03352    0.02933   0.004223        134        640: 1\n",
      "tensor([0.84516], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.79      0.585      0.683      0.399\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      2.86G    0.03324    0.02868    0.00417        182        640: 1\n",
      "tensor([0.91852], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.753      0.641      0.701      0.403\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      2.86G    0.03318    0.02904   0.003975        128        640: 1\n",
      "tensor([0.75111], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.632      0.744       0.41\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      2.86G    0.03269    0.02839    0.00391        112        640: 1\n",
      "tensor([0.87340], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.79       0.62      0.697       0.38\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      2.86G    0.03259    0.02858   0.003927        151        640: 1\n",
      "tensor([0.85093], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.631      0.721      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      2.86G    0.03255    0.02858   0.003811        132        640: 1\n",
      "tensor([0.82656], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.823      0.636      0.726      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      2.86G    0.03227    0.02819   0.003849        131        640: 1\n",
      "tensor([0.81171], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.637      0.719      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      2.86G    0.03243    0.02811   0.003813        159        640: 1\n",
      "tensor([0.91737], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.823      0.657      0.748      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      2.86G    0.03188    0.02776   0.003626        125        640: 1\n",
      "tensor([0.73502], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.633      0.728      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      2.86G    0.03192    0.02813   0.003667         88        640: 1\n",
      "tensor([0.69195], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.84      0.642      0.723      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      2.86G    0.03147    0.02727   0.003614        137        640: 1\n",
      "tensor([0.90609], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.809      0.668       0.74      0.443\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      2.86G     0.0312    0.02774   0.003414        166        640: 1\n",
      "tensor([0.91782], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86      0.649      0.746      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      2.86G     0.0312    0.02748   0.003511        161        640: 1\n",
      "tensor([0.85312], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.659      0.745      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      2.86G    0.03117    0.02711   0.003423        118        640: 1\n",
      "tensor([0.74913], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.618      0.738       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      2.86G    0.03063    0.02696   0.003339        151        640: 1\n",
      "tensor([0.81916], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.81      0.609      0.697      0.411\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      2.86G    0.03067     0.0272   0.003321        133        640: 1\n",
      "tensor([0.79528], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904      0.634      0.742      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      2.86G    0.03051    0.02693   0.003287        154        640: 1\n",
      "tensor([0.91239], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853      0.657      0.745      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      2.86G     0.0306    0.02715   0.003387        122        640: 1\n",
      "tensor([0.73111], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.624      0.714      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      2.86G    0.02977    0.02631   0.003119        127        640: 1\n",
      "tensor([0.70656], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846        0.6      0.709      0.403\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      2.86G    0.02981    0.02558   0.003081        127        640: 1\n",
      "tensor([0.66780], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.647      0.741      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      2.86G    0.02989    0.02619   0.003189        122        640: 1\n",
      "tensor([0.79201], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.865      0.613      0.709      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      2.86G    0.02988    0.02648   0.003095        146        640: 1\n",
      "tensor([0.82406], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.854       0.61      0.714      0.424\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      2.86G    0.02989      0.026   0.003069        202        640: 1\n",
      "tensor([0.88637], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844       0.59      0.699      0.418\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      2.86G     0.0294    0.02577   0.003128         94        640: 1\n",
      "tensor([0.61030], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.614      0.703      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      2.86G     0.0293    0.02571    0.00298        152        640: 1\n",
      "tensor([0.82126], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.625      0.717      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      2.86G    0.02941    0.02577   0.003065        123        640: 1\n",
      "tensor([0.67533], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.813      0.543      0.645      0.391\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      2.86G    0.02911    0.02574   0.003031        162        640: 1\n",
      "tensor([0.75613], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.711      0.609      0.671      0.404\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      2.86G    0.02912    0.02593   0.003055        161        640: 1\n",
      "tensor([0.75184], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.865      0.616      0.726      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      2.86G    0.02896    0.02572   0.002903        122        640: 1\n",
      "tensor([0.68107], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.624       0.73      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      2.86G     0.0287    0.02539   0.003074        126        640: 1\n",
      "tensor([0.66264], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.793      0.609      0.687      0.408\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      2.86G    0.02867    0.02541     0.0029         90        640: 1\n",
      "tensor([0.59628], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.84      0.602        0.7       0.42\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      2.86G    0.02847    0.02525   0.002808        201        640:  ^C\n",
      "      42/99      2.86G    0.02847    0.02525   0.002808        201        640:  \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--freeze {freeze_layer} \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0\n",
    "# --ewc_pt runs/train/fog_02/weights/fisher.pt \\\n",
    "# --ewc_lambda 1e-3 \\\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d7facbe8-bc19-4974-bdc8-dda7d687509e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ac32abb-2560-4e55-9261-ef8b53382ff0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8aa4cf13-b8ae-4c69-b138-779c20fff993",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "from EWC_module.fisher import cal_fisher"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "fd4b4083-9fea-49fc-a34b-0138259939b0",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mComputing optimal batch size for --imgsz 640\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mCUDA:0 (NVIDIA GeForce RTX 3080) 19.71G total, 0.22G reserved, 0.05G allocated, 19.43G free\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      Params      GFLOPs  GPU_mem (GB)  forward (ms) backward (ms)                   input                  output\n",
      "     7041205       16.01         0.442         22.88         67.66        (1, 3, 640, 640)                    list\n",
      "     7041205       32.01         0.623         18.58         39.95        (2, 3, 640, 640)                    list\n",
      "     7041205       64.02         0.921         18.12          39.1        (4, 3, 640, 640)                    list\n",
      "     7041205         128         1.707         19.42         41.45        (8, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mUsing batch-size 87 for CUDA:0 15.66G/19.71G (79%) ✅\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "     7041205       256.1         3.032         26.65         47.66       (16, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning ../datasets/kitti/labels/train.cache... 4189 images, 0 backgrounds, 0 corrupt: 100%|██████████| 4189/4189 [00:00<?, ?it/s]\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "fisher context saved at runs/train/exp/weights/fisher.pt\n"
     ]
    }
   ],
   "source": [
    "cal_fisher('runs/train/exp/weights/best.pt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00ae91ef-cf86-42d6-8ce2-38edbeb3bf0e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6f3addba-ca64-416d-8ff8-f1e9efc33852",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "bc72d69c-48cf-4165-98e2-4c27b09e73c6",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mComputing optimal batch size for --imgsz 640\n",
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mCUDA:0 (NVIDIA GeForce RTX 3080) 19.71G total, 0.22G reserved, 0.05G allocated, 19.43G free\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      Params      GFLOPs  GPU_mem (GB)  forward (ms) backward (ms)                   input                  output\n",
      "     7041205       16.01         0.442          15.3         57.39        (1, 3, 640, 640)                    list\n",
      "     7041205       32.01         0.623         11.03         35.91        (2, 3, 640, 640)                    list\n",
      "     7041205       64.02         0.921         10.46         35.32        (4, 3, 640, 640)                    list\n",
      "     7041205         128         1.707         16.86         38.69        (8, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mAutoBatch: \u001b[0mUsing batch-size 87 for CUDA:0 15.66G/19.71G (79%) ✅\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "     7041205       256.1         3.032         28.84         50.72       (16, 3, 640, 640)                    list\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning ../datasets/kitti/labels/train.cache... 4189 images, 0 backgrounds, 0 corrupt: 100%|██████████| 4189/4189 [00:00<?, ?it/s]\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dd3f0588-bd28-413c-a6bd-f1cc092ba189",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6bfa1ffd-5990-4e15-8fc4-b7db317452ab",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "b52150aa-5194-44e4-8c74-e489d50b7965",
   "metadata": {},
   "outputs": [],
   "source": [
    "def ewc_loss(weights, fisher_matrix, old_dataset):\n",
    "    ckpt = torch.load(weights, map_location=\"cpu\")  # load checkpoint to CPU to avoid CUDA memory leak\n",
    "    opt = ckpt['opt']\n",
    "    \n",
    "    data, hyp, resume, cfg, single_cls, workers = (\n",
    "        opt['data'],\n",
    "        opt['hyp'],\n",
    "        opt['resume'],\n",
    "        opt['cfg'],\n",
    "        opt['single_cls'],\n",
    "        opt['workers']\n",
    "    )\n",
    "    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "    # 读取模型相关\n",
    "    with open(cfg, \"r\") as file:\n",
    "        config = yaml.safe_load(file)\n",
    "        nc = config.get('nc', None)\n",
    "\n",
    "    #读取数据相关。我们要再前向传播一次，所以需要知道刚刚使用的训练路径。\n",
    "    with open(data, \"r\") as file:\n",
    "        config = yaml.safe_load(file)\n",
    "        dataset_root = config.get('path', None)\n",
    "        train_relative_path = config.get('train', None)\n",
    "        train_path = os.path.join(dataset_root, train_relative_path)\n",
    "\n",
    "    model = Model(cfg or ckpt[\"model\"].yaml, ch=3, nc=nc, anchors=hyp.get(\"anchors\")).to(device)  # create\n",
    "    exclude = [\"anchor\"] if (cfg or hyp.get(\"anchors\")) and not resume else []  # exclude keys\n",
    "    csd = ckpt[\"model\"].float().state_dict()  # checkpoint state_dict as FP32\n",
    "    csd = intersect_dicts(csd, model.state_dict(), exclude=exclude)  # intersect\n",
    "    model.load_state_dict(csd, strict=False)  # load\n",
    "\n",
    "    \n",
    "\n",
    "\n",
    "    gs = max(int(model.stride.max()), 32)  # grid size (max stride)\n",
    "    imgsz = check_img_size(opt['imgsz'], gs, floor=gs * 2)  # verify imgsz is gs-multiple\n",
    "    amp = check_amp(model)\n",
    "    batch_size = check_train_batch_size(model, imgsz, amp)\n",
    "\n",
    "    train_loader, dataset = create_dataloader(\n",
    "        train_path,\n",
    "        imgsz,\n",
    "        batch_size // WORLD_SIZE,\n",
    "        gs,\n",
    "        single_cls,\n",
    "        hyp=hyp,\n",
    "        augment=True,\n",
    "        cache=None if opt['cache'] == \"val\" else opt['cache'],\n",
    "        rect=opt['rect'],\n",
    "        rank=LOCAL_RANK,\n",
    "        #workers=workers, python的多线程太复杂了这里先不要考虑多线程了。\n",
    "        image_weights=opt['image_weights'],\n",
    "        quad=opt['quad'],\n",
    "        prefix=colorstr(\"train: \"),\n",
    "        shuffle=True,\n",
    "        seed=opt['seed'],\n",
    "    )\n",
    "    nb = len(train_loader)\n",
    "\n",
    "    model.nc = nc \n",
    "    model.hyp = hyp  \n",
    "    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights\n",
    "\n",
    "     # 然后可以开始了，这里没有什么验证集，所以不需要分什么线程。\n",
    "    scaler = torch.cuda.amp.GradScaler(enabled=amp)\n",
    "    compute_loss = ComputeLoss(model)  # init loss class\n",
    "\n",
    "    pbar = enumerate(train_loader)\n",
    "    optimizer = smart_optimizer(model, opt['optimizer'], hyp[\"lr0\"], hyp[\"momentum\"], hyp[\"weight_decay\"])\n",
    "\n",
    "\n",
    "    #model.model.train()\n",
    "    for i, (imgs, targets, paths, _) in pbar: \n",
    "        imgs = imgs.to(device, non_blocking=True).float() / 255  # uint8 to float32, 0-255 to 0.0-1.0\n",
    "\n",
    "        if opt['multi_scale']:\n",
    "            sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs  # size\n",
    "            sf = sz / max(imgs.shape[2:])  # scale factor\n",
    "            if sf != 1:\n",
    "                ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)\n",
    "                imgs = nn.functional.interpolate(imgs, size=ns, mode=\"bilinear\", align_corners=False)\n",
    "\n",
    "         \n",
    "        ewc_loss = []\n",
    "        for name, param in model.model.named_parameters():\n",
    "            ewc_loss.append((fisher_matrix[name] * (param - old_dataset[name]) **2).sum())\n",
    "            print(sum(ewc_loss))\n",
    "\n",
    "        with torch.cuda.amp.autocast(amp):\n",
    "            pred = model(imgs)  # forward\n",
    "            loss, loss_items = compute_loss(pred, targets.to(device))  # loss scaled by batch_size\n",
    "            if opt['quad']:\n",
    "                loss *= 4.0\n",
    "                \n",
    "        scaler.scale(loss).backward()\n",
    "        for name, param in model.model.named_parameters():\n",
    "            fisher_matrix[name].data += param.grad.data**2 / nb\n",
    "\n",
    "        scaler.unscale_(optimizer)  \n",
    "        scaler.update()\n",
    "        optimizer.zero_grad()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "103a0988-29aa-422e-8351-700b5080a76e",
   "metadata": {},
   "outputs": [],
   "source": [
    "ewc_loss('runs/train/exp/weights/best.pt', fisher_matrix, old_dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f71a5833-7a0c-4887-9fbe-38cec171db20",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "46268a7c-9ac4-413b-8fb8-8dc471ceeac0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d26cc06f-ad46-43c6-b237-be38d58d5381",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_0.6/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 ba3ea0ef Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.875       0.71      0.808      0.531\n",
      "                   Car       2244       8711      0.908      0.857      0.924      0.698\n",
      "                   Van       2244        861      0.862      0.715      0.817      0.596\n",
      "                 Truck       2244        333      0.938      0.871      0.926      0.696\n",
      "                  Tram       2244        138      0.956      0.785       0.91      0.597\n",
      "            Pedestrian       2244       1286       0.83      0.644      0.738      0.386\n",
      "        Person_sitting       2244         89      0.667      0.652      0.696      0.363\n",
      "               Cyclist       2244        496      0.922      0.521      0.713      0.419\n",
      "                  Misc       2244        284      0.919      0.638      0.739      0.497\n",
      "Speed: 0.0ms pre-process, 0.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp5\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/fog_0.6/weights/best.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# ewc强度0.5\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "e9d01d2e-34eb-4569-975d-687bb0232e84",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "#这个是使用ewc的增量训练在没有加雾的第一个数据集上的效果\n",
    "\n",
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/datasets/fogged/fogged_strength0.6/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ea07b248-185a-41be-bb99-f6222e5ac6bc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_02/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 ba3ea0ef Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test... 2244 images, 0 bac\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/test.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.743      0.492      0.564      0.344\n",
      "                   Car       2244       8711       0.87      0.673      0.804      0.543\n",
      "                   Van       2244        861      0.708      0.555      0.643      0.424\n",
      "                 Truck       2244        333      0.865      0.583      0.667      0.434\n",
      "                  Tram       2244        138      0.589       0.63      0.646      0.364\n",
      "            Pedestrian       2244       1286      0.704      0.487      0.573      0.311\n",
      "        Person_sitting       2244         89      0.798      0.178      0.233      0.108\n",
      "               Cyclist       2244        496      0.748      0.476       0.55      0.318\n",
      "                  Misc       2244        284      0.666      0.352      0.394      0.252\n",
      "Speed: 0.1ms pre-process, 0.9ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp6\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/fog_02/weights/best.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# ewc强度0.5\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0fed5c4b-165f-47c9-b39a-8838bc5701ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/testing/image_2/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28dd29ef-df5a-46b5-8c5c-4b59d7170942",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9798252-4f33-408a-8c9d-34ac9f02d886",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0d0278a5-ec0e-4b4c-97ea-4a906b58a557",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "de96ef6b-f8dc-4e05-b0ef-b245c90a0fcf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "# 然后是0.6雾测试集\n",
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/datasets/fogged/fogged_strength0.6/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "b057eaed-d871-49a5-a377-e4f2bea7586b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_1.0/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 ba3ea0ef Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.851      0.656      0.751      0.472\n",
      "                   Car       2244       8711      0.898      0.818      0.902      0.651\n",
      "                   Van       2244        861      0.799      0.649      0.742      0.514\n",
      "                 Truck       2244        333      0.941      0.778      0.875      0.619\n",
      "                  Tram       2244        138      0.907      0.566      0.723      0.408\n",
      "            Pedestrian       2244       1286      0.819      0.648      0.724      0.384\n",
      "        Person_sitting       2244         89      0.601      0.596      0.638      0.346\n",
      "               Cyclist       2244        496      0.941      0.556      0.682      0.404\n",
      "                  Misc       2244        284      0.901      0.641      0.724      0.448\n",
      "Speed: 0.0ms pre-process, 0.8ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp40\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/fog_1.0/weights/best.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# ewc强度0.5\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3b65841b-26be-4728-bd9c-9530cc3d89af",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7e202f69-965d-40a6-a9ae-ddc44a54d844",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "27781bb5-ead9-4e7c-8dbb-e5e2c07b3e04",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1451169b-fdad-464b-a9c6-db179ce68613",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ab5040bf-7fa6-4e2f-9c42-3d47d10943ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 替换训练数据\n",
    "from fog_test.for_different_strength import mix_dataset\n",
    "\n",
    "origin_ratio = {\n",
    "    '0.6':0,\n",
    "}\n",
    "# 先初始化数据集训练一个没有家务数据的\n",
    "mix_dataset(fogged_folder = '../datasets/fogged/', \n",
    "            ratio = origin_ratio,\n",
    "            train_folder = '../datasets/kitti/images/origin_train', \n",
    "            out_folder = '../datasets/kitti/images/train'\n",
    "               )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eba1c0e1-9973-4d5b-8fb8-7ea7e6bc7020",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=yolov5s.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=1, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=None, SI_lambda=10.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 ba3ea0ef Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/3a04745721234ce49207fec67e7148ac\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 342/349 items from yolov5s.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train... 4189 images, 0 b\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/train.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp11/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp11\u001b[0m\n",
      "Starting training for 1 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "        0/0       3.7G        nan        nan        nan        180        640:  /root/autodl-tmp/yolo_incremental_learning/utils/plots.py:174: RuntimeWarning: invalid value encountered in cast\n",
      "  mosaic[y : y + h, x : x + w, :] = im\n",
      "        0/0       3.7G        nan        nan        nan        124        640:  /root/autodl-tmp/yolo_incremental_learning/utils/plots.py:174: RuntimeWarning: invalid value encountered in cast\n",
      "  mosaic[y : y + h, x : x + w, :] = im\n",
      "        0/0       3.7G        nan        nan        nan        160        640:  \n",
      "Traceback (most recent call last):\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/train_SI.py\", line 1127, in <module>\n",
      "    main(opt)\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/train_SI.py\", line 829, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"/root/autodl-tmp/yolo_incremental_learning/train_SI.py\", line 505, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/comet_ml/monkey_patching.py\", line 305, in wrapper\n",
      "    raise exception_raised\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/comet_ml/monkey_patching.py\", line 276, in wrapper\n",
      "    return_value = original(*args, **kwargs)\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/torch/_tensor.py\", line 492, in backward\n",
      "    torch.autograd.backward(\n",
      "  File \"/root/miniconda3/lib/python3.10/site-packages/torch/autograd/__init__.py\", line 251, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "RuntimeError: cuDNN error: CUDNN_STATUS_MAPPING_ERROR\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m The process of logging environment details (conda environment, git patch) is underway. Please be patient as this may take some time.\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "100epoch 的1.0\n",
    "0.9和0.6过于相似，没有太大区别所以再训个1.0的。\n",
    "由于将来可能要做从雾更浓忘雾不浓的反向增量，所以这个还是记录一下SI吧\n",
    "'''\n",
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 1 \\\n",
    "--SI_enable \\\n",
    "\"\"\"\n",
    "!{command}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a3f7dc90-bdfb-41c2-bde0-9b316233cf6a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_02/weights/best.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 ba3ea0ef Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   /root/autodl-tmp/yolo_incremental_learning/utils/plots.py:174: RuntimeWarning: invalid value encountered in cast\n",
      "  mosaic[y : y + h, x : x + w, :] = im\n",
      "/root/autodl-tmp/yolo_incremental_learning/utils/plots.py:174: RuntimeWarning: invalid value encountered in cast\n",
      "  mosaic[y : y + h, x : x + w, :] = im\n",
      "                 Class     Images  Instances          P          R      mAP50   /root/autodl-tmp/yolo_incremental_learning/utils/plots.py:174: RuntimeWarning: invalid value encountered in cast\n",
      "  mosaic[y : y + h, x : x + w, :] = im\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198   0.000841   5.74e-05   0.000411   0.000267\n",
      "                   Car       2244       8711    0.00673   0.000459    0.00329    0.00214\n",
      "                   Van       2244        861          0          0          0          0\n",
      "                 Truck       2244        333          0          0          0          0\n",
      "                  Tram       2244        138          0          0          0          0\n",
      "            Pedestrian       2244       1286          0          0          0          0\n",
      "        Person_sitting       2244         89          0          0          0          0\n",
      "               Cyclist       2244        496          0          0          0          0\n",
      "                  Misc       2244        284          0          0          0          0\n",
      "Speed: 0.1ms pre-process, 0.7ms inference, 0.3ms NMS per image at shape (32, 3, 640, 640)\n",
      "^C\n",
      "WARNING ⚠️ ConfusionMatrix plot failure: \n",
      "Results saved to \u001b[1mruns/val/exp46\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "开始测baseline\n",
    "首先是基于无雾数据集的测试集\n",
    "'''\n",
    "# 这是无雾训练集\n",
    "model = f'runs/train/fog_02/weights/best.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# ewc强度0.5\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d14ef9a2-9347-44f3-969f-9d0574e2e002",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8d829aa6-0dd3-4bea-b047-e38311e01f9f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "738a1ed0-843c-4cc9-9cfe-ade4f8249f8d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "56d469e5-f930-44f6-b47e-04cc2f47cd75",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/5375f11f8adc489bb79f91715fdbfaf6\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train... 4189 images, 0 b\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/train.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp59/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp59\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.65G    0.03486    0.03428   0.006893        128        640: 1\n",
      "tensor([0.87622], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.718       0.41      0.479      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      3.65G     0.0337    0.03052   0.005239        133        640: 1\n",
      "tensor([0.93715], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.804      0.545      0.647      0.381\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      3.65G     0.0365    0.03302   0.006499        131        640: 1\n",
      "tensor([1.01537], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.667      0.288      0.334       0.18\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      3.65G    0.03839    0.03477   0.007357        108        640: 1\n",
      "tensor([0.88693], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.59      0.369      0.399      0.213\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      3.65G    0.03825    0.03359   0.006356        156        640: 1\n",
      "tensor([0.97841], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.69      0.514      0.587      0.322\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      3.65G    0.03721    0.03247   0.005778        123        640: 1\n",
      "tensor([0.90661], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.785      0.521      0.626      0.356\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      3.65G    0.03643    0.03131   0.005096        174        640: 1\n",
      "tensor([1.05071], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.73       0.59      0.639      0.345\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      3.65G    0.03587    0.03066   0.005019        166        640: 1\n",
      "tensor([1.10978], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874      0.597      0.707      0.406\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      3.65G    0.03511    0.03063   0.004694        152        640: 1\n",
      "tensor([0.92849], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.768      0.609      0.674      0.379\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      3.65G    0.03493     0.0305   0.004777        136        640: 1\n",
      "tensor([0.88823], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.767      0.601      0.668      0.365\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      3.65G    0.03465    0.03022   0.004462        134        640: 1\n",
      "tensor([0.86391], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.645      0.726      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      3.65G    0.03424    0.02959   0.004339        182        640: 1\n",
      "tensor([0.91177], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.81      0.564      0.661      0.371\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      3.65G    0.03387    0.02989   0.004222        128        640: 1\n",
      "tensor([0.79485], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.763      0.622      0.694      0.408\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      3.65G    0.03357    0.02902   0.004022        112        640: 1\n",
      "tensor([0.84985], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.731      0.588      0.658      0.353\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      3.65G    0.03321    0.02921   0.004097        151        640: 1\n",
      "tensor([0.83255], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.864      0.588      0.707      0.411\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      3.65G    0.03275    0.02894   0.003886        132        640: 1\n",
      "tensor([0.79535], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.829      0.606      0.686      0.391\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      3.65G    0.03288    0.02868    0.00387        131        640: 1\n",
      "tensor([0.78754], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.794      0.589      0.694      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      3.65G     0.0328    0.02859   0.003908        159        640: 1\n",
      "tensor([0.92287], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.607      0.711      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      3.65G    0.03242    0.02832   0.003796        125        640: 1\n",
      "tensor([0.77918], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.814       0.53      0.626      0.356\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      3.65G    0.03251    0.02867   0.003817         88        640: 1\n",
      "tensor([0.70948], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.569      0.662      0.382\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      3.65G    0.03198    0.02777   0.003808        137        640: 1\n",
      "tensor([0.92322], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.797        0.6      0.694      0.413\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      3.65G    0.03154    0.02809   0.003562        166        640: 1\n",
      "tensor([0.94037], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.819      0.577      0.689      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      3.65G    0.03166    0.02783   0.003508        161        640: 1\n",
      "tensor([0.87835], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844       0.62      0.718      0.414\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      3.65G    0.03164    0.02746   0.003572        118        640: 1\n",
      "tensor([0.77680], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.837      0.601      0.714      0.412\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      3.65G    0.03114    0.02747   0.003458        151        640: 1\n",
      "tensor([0.85118], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.597      0.705      0.402\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      3.65G    0.03118    0.02768   0.003428        133        640: 1\n",
      "tensor([0.80940], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871      0.591      0.708      0.412\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      3.65G    0.03106    0.02728   0.003335        154        640: 1\n",
      "tensor([0.92414], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.85      0.615      0.712      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      3.65G    0.03083     0.0274   0.003414        122        640: 1\n",
      "tensor([0.75125], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.788      0.627      0.698       0.41\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      3.65G    0.03073    0.02722   0.003296        123        640: 1\n",
      "tensor([0.66622], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.866      0.631      0.723      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      3.65G    0.03047    0.02678   0.003219        127        640: 1\n",
      "tensor([0.68724], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.817      0.618       0.71      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      3.65G    0.03002    0.02588   0.003163        127        640: 1\n",
      "tensor([0.68269], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.815      0.597        0.7      0.396\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      3.65G     0.0303    0.02651   0.003182        122        640: 1\n",
      "tensor([0.77243], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.866      0.623      0.741      0.436\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      3.65G    0.03011    0.02669   0.003183        146        640: 1\n",
      "tensor([0.82794], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.832      0.621      0.712      0.414\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      3.65G    0.03007    0.02616   0.003092        202        640: 1\n",
      "tensor([0.87670], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.761      0.659      0.733      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      3.65G    0.02968    0.02595   0.003118         94        640: 1\n",
      "tensor([0.62945], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.79      0.642      0.723      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      3.65G    0.02968    0.02596    0.00302        152        640: 1\n",
      "tensor([0.83553], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.806      0.635      0.715      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      3.65G    0.02917    0.02582   0.003087        123        640: 1\n",
      "tensor([0.67382], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.862      0.659      0.734      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      3.65G    0.02945    0.02605   0.003076        162        640: 1\n",
      "tensor([0.72866], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.822      0.677      0.751      0.443\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      3.65G    0.02957    0.02628   0.003044        161        640: 1\n",
      "tensor([0.79832], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.861      0.633      0.722      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      3.65G     0.0293    0.02602   0.002992        122        640: 1\n",
      "tensor([0.64650], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.612      0.728      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      3.65G    0.02895    0.02554   0.002996        126        640: 1\n",
      "tensor([0.66549], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.85      0.611      0.713      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      3.65G    0.02892    0.02566   0.002955         90        640: 1\n",
      "tensor([0.64009], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.816      0.628      0.709      0.418\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      3.65G    0.02871     0.0254    0.00283        118        640: 1\n",
      "tensor([0.74788], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.881      0.609       0.71      0.423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      3.65G    0.02865    0.02546   0.002743        157        640: 1\n",
      "tensor([0.78027], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853       0.61      0.698      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      3.65G    0.02857    0.02538   0.002809        104        640: 1\n",
      "tensor([0.55340], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.795      0.638      0.708      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      3.65G    0.02857     0.0256   0.002701        157        640: 1\n",
      "tensor([0.72890], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.612      0.691      0.423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      3.65G    0.02849      0.025   0.002675        108        640: 1\n",
      "tensor([0.55842], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.822      0.645      0.735      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      3.65G    0.02826    0.02497   0.002764        159        640: 1\n",
      "tensor([0.74012], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.857      0.632      0.723      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      3.65G    0.02786    0.02486   0.002695        118        640: 1\n",
      "tensor([0.68642], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.833      0.647      0.736      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      3.65G    0.02816    0.02525   0.002676        176        640: 1\n",
      "tensor([0.88054], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.83      0.635      0.721      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      3.65G    0.02816    0.02488   0.002716        130        640: 1\n",
      "tensor([0.70387], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.817      0.608      0.701      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/99      3.65G    0.02767    0.02492   0.002628        178        640: 1\n",
      "tensor([0.87635], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.599      0.701      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/99      3.65G    0.02762    0.02459    0.00262        148        640: 1\n",
      "tensor([0.70460], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.596      0.698      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/99      3.65G    0.02752    0.02446    0.00256        115        640: 1\n",
      "tensor([0.64466], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.893      0.616      0.715      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/99      3.65G    0.02747    0.02428   0.002555        124        640: 1\n",
      "tensor([0.64266], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.629      0.713       0.43\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/99      3.65G    0.02704    0.02393   0.002526        163        640: 1\n",
      "tensor([0.67814], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.593      0.699      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/99      3.65G    0.02712    0.02436   0.002569        200        640: 1\n",
      "tensor([0.79404], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852      0.611      0.709       0.43\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/99      3.65G    0.02707    0.02429   0.002572        141        640: 1\n",
      "tensor([0.69233], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.608      0.728      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/99      3.65G    0.02706    0.02422   0.002597        146        640: 1\n",
      "tensor([0.68706], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.816      0.635      0.704      0.424\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/99      3.65G    0.02692    0.02408   0.002504        168        640: 1\n",
      "tensor([0.71621], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.639      0.725       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/99      3.65G    0.02684    0.02381   0.002439        175        640: 1\n",
      "tensor([0.74557], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.631      0.708      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/99      3.65G    0.02656    0.02392   0.002472        139        640: 1\n",
      "tensor([0.75424], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.888        0.6      0.713      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/99      3.65G    0.02643    0.02324    0.00245        117        640: 1\n",
      "tensor([0.64847], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853      0.637      0.722      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/99      3.65G    0.02658    0.02355   0.002372        129        640: 1\n",
      "tensor([0.66344], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.815       0.64      0.708      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/99      3.65G    0.02635    0.02358   0.002457        109        640: 1\n",
      "tensor([0.60407], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.813      0.602      0.689      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/99      3.65G    0.02636    0.02369   0.002343        154        640: 1\n",
      "tensor([0.75100], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.588      0.693      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/99      3.65G    0.02609    0.02346   0.002383        119        640: 1\n",
      "tensor([0.64285], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.834      0.624      0.709      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/99      3.65G    0.02604    0.02296   0.002375        153        640: 1\n",
      "tensor([0.70375], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.847      0.576      0.668      0.404\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/99      3.65G    0.02602    0.02338   0.002384        116        640: 1\n",
      "tensor([0.60201], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.889      0.593      0.695      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/99      3.65G    0.02561    0.02274   0.002376        141        640: 1\n",
      "tensor([0.70694], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.614      0.696      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/99      3.65G    0.02575    0.02302   0.002281        175        640: 1\n",
      "tensor([0.79872], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886      0.597      0.711      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/99      3.65G    0.02589    0.02302   0.002344        161        640: 1\n",
      "tensor([0.70766], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.818      0.581      0.661      0.409\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/99      3.65G    0.02558    0.02267   0.002213        114        640: 1\n",
      "tensor([0.60760], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.808      0.619      0.703      0.436\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/99      3.65G    0.02573    0.02313     0.0023        141        640: 1\n",
      "tensor([0.70491], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.596        0.7      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/99      3.65G    0.02556    0.02301   0.002262        133        640: 1\n",
      "tensor([0.59256], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.842      0.616      0.701      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/99      3.65G    0.02525    0.02249   0.002176        159        640: 1\n",
      "tensor([0.74001], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.582      0.692       0.43\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/99      3.65G    0.02546    0.02251   0.002266        122        640: 1\n",
      "tensor([0.55616], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.616      0.703      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/99      3.65G    0.02542     0.0228   0.002215        137        640: 1\n",
      "tensor([0.65278], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.596      0.689      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/99      3.65G    0.02531    0.02232   0.002206        137        640: 1\n",
      "tensor([0.64524], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.829      0.594      0.677      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/99      3.65G    0.02476    0.02191   0.002217        161        640: 1\n",
      "tensor([0.73391], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.578      0.686      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      80/99      3.65G    0.02496    0.02239   0.002142        154        640: 1\n",
      "tensor([0.62644], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.833      0.597      0.691      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      81/99      3.65G    0.02485    0.02203   0.002145        181        640: 1\n",
      "tensor([0.73247], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871      0.604      0.699       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      82/99      3.65G    0.02475    0.02202   0.002141        149        640: 1\n",
      "tensor([0.61739], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.607       0.69      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      83/99      3.65G    0.02464    0.02205   0.002072        118        640: 1\n",
      "tensor([0.59445], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.847      0.601      0.704      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      84/99      3.65G    0.02451    0.02189   0.002101        178        640: 1\n",
      "tensor([0.73559], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.876      0.572      0.694      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      85/99      3.65G    0.02442    0.02181    0.00205        140        640: 1\n",
      "tensor([0.65241], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.606      0.707      0.452\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      86/99      3.65G     0.0245     0.0219   0.002119        119        640: 1\n",
      "tensor([0.52392], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.885      0.624      0.711      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      87/99      3.65G    0.02436    0.02198   0.002044        114        640: 1\n",
      "tensor([0.49844], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863       0.61      0.702      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      88/99      3.65G    0.02447    0.02166   0.002117        117        640: 1\n",
      "tensor([0.54517], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.635      0.711      0.452\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      89/99      3.65G    0.02435    0.02178   0.002015        118        640: 1\n",
      "tensor([0.55598], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.884      0.613      0.714      0.455\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      90/99      3.65G    0.02419    0.02148   0.001978        115        640: 1\n",
      "tensor([0.57990], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.608      0.703      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      91/99      3.65G    0.02392    0.02119   0.001969        159        640: 1\n",
      "tensor([0.73937], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.621      0.704      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      92/99      3.65G    0.02402    0.02147   0.002026        165        640: 1\n",
      "tensor([0.68237], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.833      0.638       0.71      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      93/99      3.65G    0.02389    0.02113   0.002024        126        640: 1\n",
      "tensor([0.57596], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852      0.632      0.715      0.454\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      94/99      3.65G    0.02379    0.02113    0.00203        112        640: 1\n",
      "tensor([0.56526], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.861      0.597      0.702      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      95/99      3.65G    0.02358    0.02097   0.001882        121        640: 1\n",
      "tensor([0.58768], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.808       0.62      0.701      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      96/99      3.65G    0.02386    0.02099   0.002002        195        640: 1\n",
      "tensor([0.63227], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.854      0.613      0.706      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      97/99      3.65G    0.02383    0.02104   0.001939        101        640: 1\n",
      "tensor([0.58344], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.819      0.629      0.706      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      98/99      3.65G    0.02352     0.0209   0.002009        137        640: 1\n",
      "tensor([0.57458], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.596      0.701       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      99/99      3.65G    0.02361    0.02104   0.001902        115        640: 1\n",
      "tensor([0.49200], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.795      0.633      0.704      0.445\n",
      "\n",
      "100 epochs completed in 1.416 hours.\n",
      "Optimizer stripped from runs/train/exp59/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp59/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp59/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.613      0.715      0.455\n",
      "                   Car       1048       4012      0.914      0.768      0.885      0.632\n",
      "                   Van       1048        431      0.866      0.661      0.776      0.556\n",
      "                 Truck       1048        166       0.91      0.674      0.808      0.565\n",
      "                  Tram       1048         56      0.904      0.674      0.755      0.482\n",
      "            Pedestrian       1048        618      0.822      0.545      0.662      0.332\n",
      "        Person_sitting       1048         20       0.94       0.55      0.615      0.396\n",
      "               Cyclist       1048        234      0.924      0.509      0.622      0.317\n",
      "                  Misc       1048        138      0.787      0.522      0.594      0.359\n",
      "Results saved to \u001b[1mruns/train/exp59\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m The process of logging environment details (conda environment, git patch) is underway. Please be patient as this may take some time.\n",
      "\u001b[1;38;5;214mCOMET WARNING:\u001b[0m Failed to complete logging of all environment details (conda environment, git patch)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/5375f11f8adc489bb79f91715fdbfaf6\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.8349225844715646\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 290.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.8848672391351688\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.6324881202325954\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.9139917365275746\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.768444666001994\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 3083.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.6559602295507013\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.6220962294599625\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.31664374456922906\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.9237203052450655\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.5085470085470085\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 119.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.6276283780425423\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 19.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.593915926154382\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.359199865166026\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.7874434927575024\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.5217391304347826\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 72.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.6555928727234576\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 73.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.6624337630765037\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.3323929859057687\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.8217967408589132\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.5453074433656958\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 337.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.6939570418510819\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.6152768601138998\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.3957156162848462\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.9399901300051935\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.55\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.7720084523837905\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.7546302296221931\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.48219012225659846\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.9041135129662682\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.6735869166424722\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 38.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.7746054017583963\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.8078467815532655\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.5649065548652807\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.9104859691091864\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.6740155818469071\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 112.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.749954398437541\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 44.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.7755413174826364\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.5555989446240839\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.8661397374163331\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.6612529002320185\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 285.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2610]                    : (0.5218462944030762, 2.199005603790283)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [200]          : (0.3342235416829521, 0.7512721246384699)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [200]     : (0.18026646014515207, 0.45513789850002184)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [200]        : (0.5898948823925825, 0.9096961406539352)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [200]           : (0.28770565436022344, 0.6773866418479717)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [200]           : (0.023523645475506783, 0.0383877195417881)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [200]           : (0.0018821274861693382, 0.007357478141784668)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [200]           : (0.020904000848531723, 0.03477301076054573)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [200]             : (0.033321019262075424, 0.04902467131614685)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [200]             : (0.005353031679987907, 0.01743783988058567)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [200]             : (0.05125068873167038, 0.08411847054958344)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [200]                    : (0.0002980000000000002, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/5375f11f8adc489bb79f91715fdbfaf6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp59\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.87 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;214mCOMET WARNING:\u001b[0m Failed to log run in comet.com\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "583363ad-ac46-4d0d-b8dc-f4ba50709906",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=runs/train/fog_02/weights/fisher.pt, ewc_lambda=0.001, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 cbe9b398 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/38002a3c04c24868b1f6238ff03e2fe9\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp77/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp77\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.53G    0.04533     0.0582    0.01427        169        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "       0/99      3.53G    0.03487    0.03432   0.006906        128        640: 1\n",
      "tensor([0.91390], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \u001b[1;38;5;214mCOMET WARNING:\u001b[0m Unknown error retrieving Conda package as an explicit file\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.734      0.382      0.454      0.261\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      3.53G     0.0336    0.03057   0.005307        133        640: 1\n",
      "tensor([1.00405], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.696      0.518      0.579      0.334\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      3.53G    0.03659    0.03296   0.006343        131        640: 1\n",
      "tensor([1.00276], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.627       0.37      0.401      0.219\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      3.53G    0.03866    0.03467   0.007245        108        640: 1\n",
      "tensor([0.85383], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.772      0.493      0.566      0.309\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      3.53G    0.03796    0.03304   0.006139        156        640: 1\n",
      "tensor([1.02975], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.747      0.581      0.663       0.38\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      3.53G    0.03718    0.03224   0.005724        123        640: 1\n",
      "tensor([0.86605], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.781      0.574      0.657       0.36\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      3.53G    0.03619     0.0314   0.005135        174        640: 1\n",
      "tensor([1.04530], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.772      0.594      0.655      0.353\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      3.53G    0.03582    0.03075   0.005078        166        640: 1\n",
      "tensor([1.06645], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.77      0.612       0.69      0.389\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      3.53G    0.03504    0.03075   0.004723        152        640: 1\n",
      "tensor([0.91898], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.733      0.587      0.652      0.351\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      3.53G    0.03526    0.03082   0.004897        136        640: 1\n",
      "tensor([0.94025], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.829      0.585      0.661      0.378\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      3.53G    0.03482    0.03033    0.00462        134        640: 1\n",
      "tensor([0.86448], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.849      0.648       0.74      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      3.53G    0.03414    0.02961   0.004417        182        640: 1\n",
      "tensor([0.94901], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.818      0.612      0.689      0.406\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      3.53G    0.03415    0.02987    0.00437        128        640: 1\n",
      "tensor([0.78172], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.627      0.714      0.403\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      3.53G    0.03348    0.02925   0.004204        112        640: 1\n",
      "tensor([0.91551], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.615      0.733      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      3.53G    0.03347    0.02943   0.004155        151        640: 1\n",
      "tensor([0.85007], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.822       0.65      0.731      0.423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      3.53G    0.03307    0.02932    0.00402        132        640: 1\n",
      "tensor([0.87507], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.789      0.634      0.702      0.402\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      3.53G    0.03304    0.02885      0.004        131        640: 1\n",
      "tensor([0.81332], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.842      0.662      0.759      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      3.53G    0.03297    0.02873   0.003937        159        640: 1\n",
      "tensor([0.97713], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.808      0.648      0.725      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      3.53G    0.03266    0.02856    0.00389        125        640: 1\n",
      "tensor([0.74682], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86      0.615      0.704      0.392\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      3.53G    0.03259    0.02874   0.003771         88        640: 1\n",
      "tensor([0.68118], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.876      0.588      0.709       0.42\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      3.53G    0.03208    0.02786   0.003759        137        640: 1\n",
      "tensor([0.92317], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86      0.626      0.737      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      3.53G     0.0318    0.02825   0.003579        166        640: 1\n",
      "tensor([0.95526], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.613      0.712      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      3.53G    0.03155    0.02785   0.003505        161        640: 1\n",
      "tensor([0.87989], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.834      0.615      0.716      0.414\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      3.53G    0.03162    0.02756   0.003595        118        640: 1\n",
      "tensor([0.75994], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.81      0.671      0.741      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      3.53G    0.03129    0.02765   0.003515        151        640: 1\n",
      "tensor([0.90515], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.603      0.676      0.393\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      3.53G    0.03138    0.02788   0.003467        133        640: 1\n",
      "tensor([0.79415], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.619       0.71      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      3.53G    0.03078    0.02734   0.003378        154        640: 1\n",
      "tensor([0.94092], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.802      0.637      0.712      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      3.53G    0.03115    0.02752   0.003487        122        640: 1\n",
      "tensor([0.78320], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.842      0.583      0.704      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      3.53G    0.03074    0.02726   0.003303        123        640: 1\n",
      "tensor([0.64822], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.628      0.721      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      3.53G     0.0307    0.02698   0.003206        127        640: 1\n",
      "tensor([0.68637], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853      0.621      0.724      0.424\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      3.53G    0.03025    0.02603   0.003202        127        640: 1\n",
      "tensor([0.71602], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.884      0.626      0.735      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      3.53G    0.03027     0.0266   0.003232        122        640: 1\n",
      "tensor([0.80240], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871      0.626      0.716      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      3.53G    0.03025    0.02688   0.003138        146        640: 1\n",
      "tensor([0.84184], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.594      0.707      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      3.53G    0.03009    0.02639   0.003148        202        640: 1\n",
      "tensor([0.93439], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.835       0.61      0.708      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      3.53G    0.02988    0.02615   0.003149         94        640: 1\n",
      "tensor([0.63805], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.634      0.728      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      3.53G    0.02976    0.02622    0.00316        152        640: 1\n",
      "tensor([0.85195], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.847      0.604      0.703      0.425\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      3.53G    0.02966    0.02599   0.003088        123        640: 1\n",
      "tensor([0.69896], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839       0.61      0.711      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      3.53G    0.02955    0.02616   0.003198        162        640: 1\n",
      "tensor([0.74686], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.822      0.621      0.711      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      3.53G    0.02958    0.02633   0.003111        161        640: 1\n",
      "tensor([0.80521], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.653      0.744      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      3.53G    0.02939    0.02606   0.002944        122        640: 1\n",
      "tensor([0.68725], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.814      0.645      0.737      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      3.53G    0.02917     0.0257   0.003015        126        640: 1\n",
      "tensor([0.66920], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.801      0.626      0.705      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      3.53G    0.02874    0.02569   0.003004         90        640: 1\n",
      "tensor([0.63265], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.619      0.728      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      3.53G     0.0288    0.02557   0.002861        118        640: 1\n",
      "tensor([0.76255], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.857      0.633       0.73      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      3.53G    0.02865    0.02554   0.002725        157        640: 1\n",
      "tensor([0.81553], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.625      0.714      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      3.53G    0.02877    0.02549   0.002827        104        640: 1\n",
      "tensor([0.56309], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846      0.618      0.713      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      3.53G    0.02855    0.02572   0.002725        157        640: 1\n",
      "tensor([0.73203], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.605      0.699      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      3.53G     0.0285    0.02507   0.002693        108        640: 1\n",
      "tensor([0.57726], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.664      0.738      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      3.53G    0.02841    0.02511   0.002822        159        640: 1\n",
      "tensor([0.76344], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.882      0.664      0.747      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      3.53G    0.02798    0.02503   0.002683        118        640: 1\n",
      "tensor([0.68360], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.84      0.606      0.704       0.42\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      3.53G    0.02825    0.02533   0.002727        176        640: 1\n",
      "tensor([0.90491], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.619       0.71      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      3.53G    0.02819    0.02498   0.002737        130        640: 1\n",
      "tensor([0.66741], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.83      0.628      0.729      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/99      3.53G    0.02766    0.02497   0.002602        178        640: 1\n",
      "tensor([0.86640], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852      0.621      0.724      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/99      3.53G    0.02774    0.02463   0.002639        148        640: 1\n",
      "tensor([0.70270], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.621      0.729       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/99      3.53G    0.02762    0.02455   0.002607        115        640: 1\n",
      "tensor([0.65341], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.635       0.73       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/99      3.53G    0.02749    0.02439   0.002607        124        640: 1\n",
      "tensor([0.64376], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.781      0.633      0.708      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/99      3.53G    0.02716    0.02403   0.002577        163        640: 1\n",
      "tensor([0.72760], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.586      0.684      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/99      3.53G    0.02729    0.02444   0.002549        200        640: 1\n",
      "tensor([0.78234], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.624      0.721      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/99      3.53G    0.02714    0.02437   0.002538        141        640: 1\n",
      "tensor([0.68958], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.642      0.733       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/99      3.53G    0.02715    0.02428   0.002636        146        640: 1\n",
      "tensor([0.68601], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.641      0.723      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/99      3.53G    0.02705    0.02425   0.002571        168        640: 1\n",
      "tensor([0.71115], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898      0.622      0.735      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/99      3.53G    0.02697    0.02397   0.002481        175        640: 1\n",
      "tensor([0.75321], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846      0.651      0.728      0.443\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/99      3.53G    0.02658    0.02402   0.002535        139        640: 1\n",
      "tensor([0.76655], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.828      0.617       0.71      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/99      3.53G    0.02651    0.02326   0.002403        117        640: 1\n",
      "tensor([0.66190], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853      0.619      0.721      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/99      3.53G    0.02662     0.0236    0.00236        129        640: 1\n",
      "tensor([0.67699], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.83      0.674      0.741      0.455\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/99      3.53G    0.02649     0.0237     0.0025        109        640: 1\n",
      "tensor([0.61919], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.812      0.642      0.721      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/99      3.53G    0.02641    0.02378   0.002358        154        640: 1\n",
      "tensor([0.76635], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.843      0.647      0.725      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/99      3.53G    0.02613     0.0235   0.002407        119        640: 1\n",
      "tensor([0.66299], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.615      0.706      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/99      3.53G    0.02609    0.02298   0.002391        153        640: 1\n",
      "tensor([0.72124], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.824      0.601      0.684      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/99      3.53G    0.02612    0.02351   0.002401        116        640: 1\n",
      "tensor([0.61535], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86      0.638      0.718      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/99      3.53G    0.02573    0.02285   0.002383        141        640: 1\n",
      "tensor([0.70612], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.618      0.721      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/99      3.53G    0.02583    0.02307   0.002348        175        640: 1\n",
      "tensor([0.81851], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.821      0.627      0.714      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/99      3.53G     0.0259    0.02312   0.002323        161        640: 1\n",
      "tensor([0.72190], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.603      0.693      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/99      3.53G    0.02569    0.02275   0.002194        114        640: 1\n",
      "tensor([0.63330], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.641      0.722       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/99      3.53G    0.02586    0.02326   0.002398        141        640: 1\n",
      "tensor([0.70966], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.842      0.608        0.7      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/99      3.53G    0.02566    0.02309   0.002247        133        640: 1\n",
      "tensor([0.58948], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.855      0.597      0.696      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/99      3.53G    0.02535    0.02255   0.002188        159        640: 1\n",
      "tensor([0.74401], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853      0.595      0.697       0.43\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/99      3.53G    0.02551    0.02262   0.002248        122        640: 1\n",
      "tensor([0.57090], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.85      0.586      0.692      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/99      3.53G     0.0255     0.0229   0.002222        137        640: 1\n",
      "tensor([0.67084], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.584      0.686      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/99      3.53G    0.02538    0.02241   0.002234        137        640: 1\n",
      "tensor([0.64606], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.823      0.609      0.702      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/99      3.53G    0.02482    0.02196   0.002232        161        640: 1\n",
      "tensor([0.73128], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.587      0.687      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      80/99      3.53G    0.02506    0.02249   0.002141        154        640: 1\n",
      "tensor([0.62762], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86        0.6      0.704      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      81/99      3.53G    0.02486    0.02213    0.00217        181        640: 1\n",
      "tensor([0.74717], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.621      0.709      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      82/99      3.53G    0.02481    0.02204   0.002125        149        640: 1\n",
      "tensor([0.62239], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.885      0.588        0.7      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      83/99      3.53G    0.02471     0.0221   0.002075        118        640: 1\n",
      "tensor([0.59596], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.621      0.724      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      84/99      3.53G    0.02454    0.02192   0.002107        178        640: 1\n",
      "tensor([0.73595], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.873      0.601      0.716      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      85/99      3.53G    0.02449    0.02189   0.002064        140        640: 1\n",
      "tensor([0.65868], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.859      0.606      0.715      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      86/99      3.53G    0.02454    0.02199   0.002134        119        640: 1\n",
      "tensor([0.53090], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871       0.61      0.711      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      87/99      3.53G    0.02446    0.02207   0.002029        114        640: 1\n",
      "tensor([0.51192], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.611      0.711      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      88/99      3.53G    0.02454    0.02172   0.002095        117        640: 1\n",
      "tensor([0.55524], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.617      0.712      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      89/99      3.53G    0.02439    0.02182   0.002025        118        640: 1\n",
      "tensor([0.56149], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.893      0.621      0.719      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      90/99      3.53G    0.02425    0.02148    0.00196        115        640: 1\n",
      "tensor([0.57836], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.815      0.636      0.715       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      91/99      3.53G    0.02401    0.02122   0.001986        159        640: 1\n",
      "tensor([0.74743], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.619      0.713      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      92/99      3.53G    0.02405    0.02151   0.002028        165        640: 1\n",
      "tensor([0.68929], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.895      0.601       0.72      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      93/99      3.53G    0.02394    0.02116   0.002016        126        640: 1\n",
      "tensor([0.56770], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.88      0.626      0.717      0.458\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      94/99      3.53G    0.02384    0.02118   0.002023        112        640: 1\n",
      "tensor([0.56871], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.591      0.698      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      95/99      3.53G    0.02363    0.02097   0.001894        121        640: 1\n",
      "tensor([0.60668], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.888      0.591        0.7      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      96/99      3.53G     0.0239      0.021   0.002023        195        640: 1\n",
      "tensor([0.63816], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.597      0.705      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      97/99      3.53G    0.02386    0.02112   0.001924        101        640: 1\n",
      "tensor([0.57676], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.601       0.71      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      98/99      3.53G    0.02358    0.02094   0.001997        137        640: 1\n",
      "tensor([0.57359], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.887      0.604      0.713      0.454\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      99/99      3.53G    0.02366    0.02109   0.001916        115        640: 1\n",
      "tensor([0.49351], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.878      0.612      0.718      0.457\n",
      "\n",
      "100 epochs completed in 1.401 hours.\n",
      "Optimizer stripped from runs/train/exp77/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp77/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp77/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.878      0.626      0.717      0.458\n",
      "                   Car       1048       4012      0.905      0.789      0.892      0.643\n",
      "                   Van       1048        431      0.872      0.722      0.818      0.576\n",
      "                 Truck       1048        166      0.886      0.687      0.806      0.584\n",
      "                  Tram       1048         56      0.874      0.621      0.727      0.446\n",
      "            Pedestrian       1048        618      0.785      0.576      0.674      0.331\n",
      "        Person_sitting       1048         20      0.997        0.6      0.597       0.38\n",
      "               Cyclist       1048        234      0.843      0.491      0.595      0.327\n",
      "                  Misc       1048        138      0.858      0.522      0.628      0.377\n",
      "Results saved to \u001b[1mruns/train/exp77\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/38002a3c04c24868b1f6238ff03e2fe9\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.8431417577117352\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 331.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.8919225702686833\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.6430523564814498\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.9053275819423323\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.7889497797897598\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 3165.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.6210276737864443\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 21.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.5954084001447576\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.32662646604531353\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.843393760736907\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.49145299145299143\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 115.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.6490367672325261\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.6280817295446128\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.37656660961664773\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.8584996356080693\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.5217391304347826\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 72.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.6646033490932042\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 97.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.6740493657615216\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.3311875744561488\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.7853246700870472\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.5760517799352751\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 356.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.7490901044107784\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.5973722275795564\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.3801747484301416\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.9967713439935663\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.7260887983071052\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.7267767248254389\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.4462693124728103\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.874257783262821\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.6208647924642887\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 35.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.7738721259638751\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.8064726587553757\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.5835248066851151\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.8863158532647916\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.6867469879518072\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 114.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.7896461082698061\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 46.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.8175251872605819\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.5764691539338909\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.8718942558489613\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.7215777262180975\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 311.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2610]                    : (0.5080533027648926, 2.199005603790283)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [200]          : (0.40070412039823816, 0.7591790193540484)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [200]     : (0.21854449017045402, 0.45810868911384195)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [200]        : (0.6267113296540721, 0.8984632757272257)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [200]           : (0.3697762597076356, 0.6743261069504162)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [200]           : (0.023584965616464615, 0.03866460546851158)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [200]           : (0.0018938053399324417, 0.007244708947837353)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [200]           : (0.02093564346432686, 0.0346733033657074)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [200]             : (0.033203188329935074, 0.045591264963150024)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [200]             : (0.005180012434720993, 0.014938747510313988)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [200]             : (0.05137622728943825, 0.07452380657196045)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [200]                    : (0.0002980000000000002, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/38002a3c04c24868b1f6238ff03e2fe9\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp77\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.88 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--ewc_pt runs/train/fog_02/weights/fisher.pt \\\n",
    "--ewc_lambda 1e-3 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# L2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c87185cd-be07-4326-896a-63e52687cc71",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/exp77/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 cbe9b398 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.858       0.63      0.743      0.462\n",
      "                   Car       2244       8711      0.912       0.78       0.89      0.641\n",
      "                   Van       2244        861      0.827      0.672      0.772      0.536\n",
      "                 Truck       2244        333       0.92      0.754      0.853      0.619\n",
      "                  Tram       2244        138      0.954      0.605      0.783      0.453\n",
      "            Pedestrian       2244       1286      0.786      0.616      0.691      0.363\n",
      "        Person_sitting       2244         89       0.71      0.596      0.676      0.329\n",
      "               Cyclist       2244        496      0.899      0.464      0.602      0.336\n",
      "                  Misc       2244        284      0.854      0.556      0.679      0.419\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.9ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp75\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/exp77/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 这个是没有ewc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f8dcb51-226e-4682-905c-15e535cb46b9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ad7d610d-e0bb-40de-9448-83d242181530",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "11cd2fdc-8210-4718-8a0c-d433675040cb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09ea47fb-8529-4322-bb71-2ae5568dabbf",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d40a4a1f-4e49-4c3b-9a8b-8eb161a306cb",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=runs/train/fog_02/weights/fisher.pt, ewc_lambda=0.001, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 cbe9b398 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/6947081a572a4e938d1946fdca546c21\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp82/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp82\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.53G    0.03485    0.03429   0.006985        128        640: 1\n",
      "tensor([0.89333], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.692      0.414      0.454      0.258\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      3.53G    0.03377    0.03049   0.005148        133        640: 1\n",
      "tensor([0.96951], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.757      0.547      0.637      0.376\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      3.53G    0.03665    0.03348   0.006631        131        640: 1\n",
      "tensor([1.08227], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.498      0.341      0.329      0.178\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      3.53G     0.0389    0.03516   0.007611        108        640: 1\n",
      "tensor([0.86673], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.662      0.399      0.448      0.244\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      3.53G    0.03809    0.03314   0.006215        156        640: 1\n",
      "tensor([0.94194], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.711      0.524      0.602      0.318\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      3.53G    0.03703    0.03237   0.005836        123        640: 1\n",
      "tensor([0.90550], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.74      0.558       0.62      0.345\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      3.53G    0.03641     0.0315   0.005256        174        640: 1\n",
      "tensor([1.03341], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.554      0.647      0.353\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      3.53G    0.03613    0.03091    0.00513        166        640: 1\n",
      "tensor([1.09926], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.772      0.538      0.626       0.34\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      3.53G    0.03544    0.03102   0.004921        152        640: 1\n",
      "tensor([0.94738], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.764      0.538      0.597      0.332\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      3.53G    0.03494    0.03074   0.004815        136        640: 1\n",
      "tensor([0.89109], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.794        0.6      0.676      0.389\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      3.53G    0.03487    0.03048   0.004699        134        640: 1\n",
      "tensor([0.87155], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.837      0.618      0.701      0.407\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      3.53G    0.03423    0.02976   0.004476        182        640: 1\n",
      "tensor([0.96081], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.748      0.574      0.645      0.372\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      3.53G    0.03414    0.02993   0.004312        128        640: 1\n",
      "tensor([0.76022], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.623      0.706      0.403\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      3.53G     0.0336    0.02921   0.004153        112        640: 1\n",
      "tensor([0.92403], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.796      0.625      0.717      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      3.53G    0.03354    0.02956    0.00429        151        640: 1\n",
      "tensor([0.86311], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.754      0.584      0.658      0.367\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      3.53G    0.03335    0.02928   0.003987        132        640: 1\n",
      "tensor([0.83602], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.601      0.718      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      3.53G    0.03266    0.02871   0.003826        131        640: 1\n",
      "tensor([0.80726], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.855      0.621      0.725      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      3.53G     0.0331    0.02877   0.003934        159        640: 1\n",
      "tensor([0.90633], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.81      0.608      0.695      0.403\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      3.53G    0.03251    0.02838   0.003847        125        640: 1\n",
      "tensor([0.74946], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.824      0.546      0.647      0.356\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      3.53G    0.03253    0.02876   0.003796         88        640: 1\n",
      "tensor([0.69298], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.628       0.72      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      3.53G    0.03215    0.02797   0.003764        137        640: 1\n",
      "tensor([0.97018], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.778      0.604      0.699      0.402\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      3.53G    0.03176    0.02826   0.003592        166        640: 1\n",
      "tensor([0.91133], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.628      0.731      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      3.53G    0.03145    0.02782   0.003538        161        640: 1\n",
      "tensor([0.87526], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.824       0.63      0.712      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      3.53G    0.03161    0.02748   0.003513        118        640: 1\n",
      "tensor([0.77070], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.626      0.721      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      3.53G    0.03137    0.02751   0.003505        151        640: 1\n",
      "tensor([0.86176], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.623      0.714       0.41\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      3.53G    0.03123    0.02767   0.003431        133        640: 1\n",
      "tensor([0.80384], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.629      0.714      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      3.53G    0.03097    0.02761   0.003582        122        640: 1\n",
      "tensor([0.76383], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.819       0.64       0.72      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      3.53G    0.03086    0.02745    0.00332        123        640: 1\n",
      "tensor([0.65575], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.84      0.676      0.763      0.443\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      3.53G    0.03048    0.02689    0.00325        127        640: 1\n",
      "tensor([0.68310], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.627      0.719      0.423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      3.53G    0.03015    0.02598   0.003187        127        640: 1\n",
      "tensor([0.71365], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.813      0.637      0.721      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      3.53G    0.03021    0.02654   0.003171        122        640: 1\n",
      "tensor([0.78797], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874       0.61      0.719      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      3.53G    0.03033    0.02683   0.003127        146        640: 1\n",
      "tensor([0.83105], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.834      0.656      0.732      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      3.53G    0.02968     0.0262   0.003147        202        640: 1\n",
      "tensor([0.91004], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.629       0.72      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      3.53G    0.02997    0.02617   0.003177         94        640: 1\n",
      "tensor([0.66078], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.827      0.631       0.71      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      3.53G    0.02987    0.02611   0.003068        152        640: 1\n",
      "tensor([0.81083], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.822       0.65      0.739      0.443\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      3.53G    0.02963    0.02594   0.003042        123        640: 1\n",
      "tensor([0.70696], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.601      0.711      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      3.53G    0.02948    0.02608   0.003127        162        640: 1\n",
      "tensor([0.76238], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.618      0.705      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      3.53G     0.0296    0.02632   0.003141        161        640: 1\n",
      "tensor([0.79268], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871       0.63      0.725      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      3.53G    0.02957    0.02625   0.003069        122        640: 1\n",
      "tensor([0.68135], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.833      0.638      0.716      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      3.53G    0.02936     0.0258   0.003025        126        640: 1\n",
      "tensor([0.67246], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.843      0.618      0.723      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      3.53G    0.02889    0.02572   0.003029         90        640: 1\n",
      "tensor([0.62357], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.631      0.732      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      3.53G    0.02877    0.02552    0.00292        118        640: 1\n",
      "tensor([0.73778], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853      0.637      0.728      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      3.53G    0.02888     0.0256   0.002734        157        640: 1\n",
      "tensor([0.78480], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.835      0.624      0.706      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      3.53G    0.02874    0.02552   0.002885        104        640: 1\n",
      "tensor([0.54044], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.607      0.706      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      3.53G    0.02858    0.02575    0.00272        157        640: 1\n",
      "tensor([0.72981], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.842      0.621      0.712      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      3.53G    0.02852     0.0251   0.002708        108        640: 1\n",
      "tensor([0.56639], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.606      0.695      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      3.53G    0.02847     0.0251   0.002733        159        640: 1\n",
      "tensor([0.76302], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.611      0.703      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      3.53G    0.02793    0.02493   0.002682        118        640: 1\n",
      "tensor([0.69044], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.815      0.622      0.709      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      3.53G     0.0282    0.02537   0.002705        176        640: 1\n",
      "tensor([0.89418], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.843      0.599      0.701      0.436\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      3.53G    0.02816    0.02498   0.002746        130        640: 1\n",
      "tensor([0.67636], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.594      0.701      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/99      3.53G    0.02777    0.02502   0.002579        178        640: 1\n",
      "tensor([0.89694], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.566      0.697       0.42\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/99      3.53G     0.0277    0.02465   0.002646        148        640: 1\n",
      "tensor([0.68493], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.843      0.598      0.701       0.43\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/99      3.53G    0.02756    0.02452   0.002586        115        640: 1\n",
      "tensor([0.66378], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.819      0.616      0.695      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/99      3.53G    0.02774    0.02452    0.00267        124        640: 1\n",
      "tensor([0.66597], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.588      0.689      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/99      3.53G     0.0272    0.02408   0.002581        163        640: 1\n",
      "tensor([0.69831], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.834      0.616        0.7      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/99      3.53G    0.02731    0.02444   0.002542        200        640: 1\n",
      "tensor([0.79083], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.817      0.613      0.697       0.42\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/99      3.53G     0.0272    0.02443   0.002563        141        640: 1\n",
      "tensor([0.68309], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.882      0.632      0.728      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/99      3.53G    0.02722    0.02426   0.002616        146        640: 1\n",
      "tensor([0.68539], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.873      0.611      0.711      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/99      3.53G      0.027    0.02419   0.002546        168        640: 1\n",
      "tensor([0.71844], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.646      0.737      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/99      3.53G    0.02691     0.0239   0.002452        175        640: 1\n",
      "tensor([0.78945], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.811       0.62       0.71      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/99      3.53G    0.02668    0.02403   0.002525        139        640: 1\n",
      "tensor([0.75108], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.578      0.702      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/99      3.53G    0.02653    0.02329   0.002403        117        640: 1\n",
      "tensor([0.63533], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.808      0.602      0.687      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/99      3.53G    0.02661    0.02356   0.002413        129        640: 1\n",
      "tensor([0.67535], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846      0.607      0.707      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/99      3.53G    0.02649    0.02367    0.00247        109        640: 1\n",
      "tensor([0.64738], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.858      0.609      0.704      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/99      3.53G    0.02647    0.02376   0.002329        154        640: 1\n",
      "tensor([0.76791], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.855      0.611      0.698      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/99      3.53G    0.02613    0.02355   0.002379        119        640: 1\n",
      "tensor([0.64423], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.829      0.627       0.71       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/99      3.53G    0.02611    0.02297   0.002381        153        640: 1\n",
      "tensor([0.70567], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.835      0.602      0.688      0.425\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/99      3.53G    0.02612    0.02351   0.002392        116        640: 1\n",
      "tensor([0.61481], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.598      0.693      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/99      3.53G    0.02569    0.02283   0.002378        141        640: 1\n",
      "tensor([0.70060], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.604       0.69      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/99      3.53G     0.0258    0.02309   0.002327        175        640: 1\n",
      "tensor([0.83723], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.795       0.64      0.716      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/99      3.53G    0.02591    0.02307   0.002324        161        640: 1\n",
      "tensor([0.72444], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.586      0.699      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/99      3.53G     0.0257    0.02277   0.002229        114        640: 1\n",
      "tensor([0.61767], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.611       0.71      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/99      3.53G    0.02584    0.02325    0.00236        141        640: 1\n",
      "tensor([0.71377], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.826      0.603      0.692      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/99      3.53G    0.02567    0.02318   0.002263        133        640: 1\n",
      "tensor([0.59471], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.832      0.608      0.694      0.423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/99      3.53G    0.02537    0.02257   0.002203        159        640: 1\n",
      "tensor([0.75962], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.859        0.6      0.702      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/99      3.53G    0.02553    0.02261   0.002286        122        640: 1\n",
      "tensor([0.58386], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.588      0.714      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/99      3.53G    0.02549    0.02286   0.002218        137        640: 1\n",
      "tensor([0.66611], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.623      0.717      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/99      3.53G    0.02539    0.02243   0.002242        137        640: 1\n",
      "tensor([0.66381], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.894      0.589      0.702      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/99      3.53G    0.02485    0.02201    0.00223        161        640: 1\n",
      "tensor([0.73512], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.866      0.608      0.713      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      80/99      3.53G    0.02506    0.02252   0.002131        154        640: 1\n",
      "tensor([0.62355], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.845      0.605      0.694      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      81/99      3.53G    0.02491    0.02215   0.002156        181        640: 1\n",
      "tensor([0.73874], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.865      0.603      0.709      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      82/99      3.53G    0.02479    0.02203   0.002147        149        640: 1\n",
      "tensor([0.63964], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874      0.613      0.707      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      83/99      3.53G    0.02471    0.02211   0.002059        118        640: 1\n",
      "tensor([0.60494], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.616      0.728      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      84/99      3.53G    0.02456    0.02192   0.002086        178        640: 1\n",
      "tensor([0.74081], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.627       0.72      0.443\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      85/99      3.53G    0.02448    0.02188    0.00205        140        640: 1\n",
      "tensor([0.65730], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.881      0.613      0.723      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      86/99      3.53G    0.02455    0.02196   0.002109        119        640: 1\n",
      "tensor([0.53180], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874      0.608      0.706      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      87/99      3.53G    0.02448     0.0221    0.00205        114        640: 1\n",
      "tensor([0.51729], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.859       0.59      0.703      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      88/99      3.53G    0.02457    0.02174   0.002105        117        640: 1\n",
      "tensor([0.54020], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.9      0.591      0.706      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      89/99      3.53G    0.02439    0.02184    0.00202        118        640: 1\n",
      "tensor([0.54412], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.583      0.708      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      90/99      3.53G    0.02425    0.02145   0.001976        115        640: 1\n",
      "tensor([0.57904], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.609      0.714      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      91/99      3.53G      0.024    0.02124   0.001982        159        640: 1\n",
      "tensor([0.74714], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.609      0.712      0.443\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      92/99      3.53G    0.02403     0.0215   0.002031        165        640: 1\n",
      "tensor([0.69143], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.861       0.59      0.695      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      93/99      3.53G    0.02391    0.02119   0.002021        126        640: 1\n",
      "tensor([0.57762], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.597        0.7      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      94/99      3.53G    0.02383     0.0212   0.002028        112        640: 1\n",
      "tensor([0.58417], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.592      0.702       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      95/99      3.53G    0.02363    0.02101   0.001892        121        640: 1\n",
      "tensor([0.58070], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.584      0.692       0.43\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      96/99      3.53G     0.0239    0.02099   0.002023        195        640: 1\n",
      "tensor([0.63479], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.862      0.592      0.701      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      97/99      3.53G    0.02386    0.02111   0.001952        101        640: 1\n",
      "tensor([0.60514], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.902      0.593      0.712      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      98/99      3.53G    0.02359    0.02097   0.001971        137        640: 1\n",
      "tensor([0.57542], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.882      0.599       0.71      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      99/99      3.53G    0.02366    0.02109   0.001911        115        640: 1\n",
      "tensor([0.48577], device='cuda:0', grad_fn=<AddBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.881      0.596      0.709      0.447\n",
      "\n",
      "100 epochs completed in 1.906 hours.\n",
      "Optimizer stripped from runs/train/exp82/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp82/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp82/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.616      0.728      0.457\n",
      "                   Car       1048       4012      0.945      0.778      0.897      0.647\n",
      "                   Van       1048        431      0.886      0.684      0.803      0.576\n",
      "                 Truck       1048        166      0.907      0.707      0.795      0.559\n",
      "                  Tram       1048         56      0.922      0.636      0.816      0.472\n",
      "            Pedestrian       1048        618      0.829      0.571      0.673      0.333\n",
      "        Person_sitting       1048         20       0.93       0.55        0.6      0.358\n",
      "               Cyclist       1048        234      0.924       0.47       0.61      0.333\n",
      "                  Misc       1048        138      0.834      0.529      0.631      0.375\n",
      "Results saved to \u001b[1mruns/train/exp82\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/6947081a572a4e938d1946fdca546c21\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.8538409680463463\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 180.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.8970545381956428\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.6470082294965536\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.9454526888072883\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.7784147557328016\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 3123.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.6232204533294495\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 9.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.6096647365331935\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.33316745934950875\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.9243301144281537\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.4700854700854701\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 110.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.6474474519024453\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.6305343055802691\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.37495601505013626\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.8342768843740784\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.5289855072463768\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 73.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.6765113859605547\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 73.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.6726255456370585\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.33299471960598814\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.829438272059095\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.5711974110032363\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 353.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.691299058702087\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.599715078314418\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.35756626052676843\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.9302999916728828\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.55\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.7527530737229131\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.8159078161785783\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.472044967244693\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.9222847906397232\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.6358694031432726\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 36.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.7946624763454027\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.7954583843764792\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.5585644838936075\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.9072293912603989\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.7069462306410097\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 117.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.7723349667035015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 38.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.8032122481800095\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.5760075517147492\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.8861060951605311\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.6844547563805105\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 295.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2610]                    : (0.5393362045288086, 2.199005603790283)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [200]          : (0.32887387847193117, 0.7625208976023954)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [200]     : (0.17799965039069116, 0.4557036851603722)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [200]        : (0.49816222027657736, 0.9058854612152105)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [200]           : (0.340849863378305, 0.675509297835707)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [200]           : (0.02358825132250786, 0.03889595344662666)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [200]           : (0.0018924119649454951, 0.007611403241753578)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [200]           : (0.020965153351426125, 0.03515535965561867)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [200]             : (0.032952893525362015, 0.051844581961631775)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [200]             : (0.005537053104490042, 0.017977764829993248)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [200]             : (0.050678715109825134, 0.07800142467021942)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [200]                    : (0.0002980000000000002, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/6947081a572a4e938d1946fdca546c21\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp82\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.89 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--ewc_pt runs/train/fog_02/weights/fisher.pt \\\n",
    "--ewc_lambda 1e-3 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# Huber"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "bb723f04-c692-41cc-aeea-4ec377718f73",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/exp82/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 cbe9b398 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.866      0.604       0.73      0.452\n",
      "                   Car       2244       8711      0.936      0.754      0.885      0.633\n",
      "                   Van       2244        861      0.897      0.616      0.748      0.522\n",
      "                 Truck       2244        333      0.948      0.778      0.879      0.616\n",
      "                  Tram       2244        138      0.918      0.566      0.771      0.444\n",
      "            Pedestrian       2244       1286      0.807        0.6      0.677      0.353\n",
      "        Person_sitting       2244         89      0.656      0.551      0.631      0.324\n",
      "               Cyclist       2244        496      0.916      0.419      0.575      0.325\n",
      "                  Misc       2244        284      0.853      0.553      0.676      0.402\n",
      "Speed: 0.0ms pre-process, 1.0ms inference, 0.8ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp77\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "model = f'runs/train/exp82/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n",
    "# 这个是没有ewc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "019593f9-84f2-4def-b989-213fda10b665",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2bbadd81-f3cc-4119-9ab8-5fef9a7ac161",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c677ae08-0407-4836-9257-b6b728afd1cd",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=100, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=./runs/train/fog_02/weights/si.pt, SI_lambda=0.1\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/e52a7c5443f845e0a302bcd20cc97984\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp61/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp61\u001b[0m\n",
      "Starting training for 100 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/99      3.61G    0.03517    0.03438   0.006924        128        640: 1\n",
      "tensor([0.93198], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00294], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.704      0.395      0.458      0.266\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/99      3.61G    0.03412    0.03082   0.005435        133        640: 1\n",
      "tensor([0.95803], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00405], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.797      0.551      0.612      0.346\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/99      3.61G    0.03662    0.03327   0.006654        131        640: 1\n",
      "tensor([0.98391], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00403], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.718      0.444      0.516      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/99      3.61G    0.03844    0.03457    0.00707        108        640: 1\n",
      "tensor([0.87680], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00842], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.7      0.367      0.433      0.228\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/99      3.61G    0.03792    0.03307   0.006208        156        640: 1\n",
      "tensor([0.95830], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01193], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.765      0.553       0.65      0.364\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/99      3.61G    0.03733    0.03236   0.005664        123        640: 1\n",
      "tensor([0.90361], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01353], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.762      0.543      0.625      0.311\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/99      3.61G    0.03623    0.03171    0.00548        174        640: 1\n",
      "tensor([1.04658], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01427], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.754      0.532        0.6      0.308\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/99      3.61G    0.03635    0.03112   0.005336        166        640: 1\n",
      "tensor([1.06746], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01543], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.782      0.561      0.654       0.37\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/99      3.61G    0.03539    0.03079   0.004632        152        640: 1\n",
      "tensor([0.97861], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01559], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.747      0.599      0.678       0.39\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/99      3.61G    0.03495    0.03045   0.004671        136        640: 1\n",
      "tensor([0.91061], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01539], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.741      0.554       0.62      0.337\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/99      3.61G    0.03483    0.03029   0.004544        134        640: 1\n",
      "tensor([0.88772], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01555], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.752      0.606      0.678      0.386\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/99      3.61G    0.03452    0.02972   0.004444        182        640: 1\n",
      "tensor([0.94410], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01565], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.743      0.568      0.651      0.361\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/99      3.61G    0.03426    0.03013   0.004333        128        640: 1\n",
      "tensor([0.79674], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01593], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.82      0.598      0.698      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/99      3.61G    0.03391    0.02948   0.004273        112        640: 1\n",
      "tensor([0.91016], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01645], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.79      0.586      0.673      0.374\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/99      3.61G    0.03358    0.02964   0.004223        151        640: 1\n",
      "tensor([0.82914], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01689], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.746      0.627      0.696      0.408\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/99      3.61G    0.03338    0.02933   0.003983        132        640: 1\n",
      "tensor([0.85211], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01691], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.78      0.643      0.708      0.404\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/99      3.61G    0.03304    0.02887   0.003958        131        640: 1\n",
      "tensor([0.81250], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01685], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.825      0.641      0.722      0.413\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/99      3.61G    0.03324    0.02895    0.00402        159        640: 1\n",
      "tensor([0.98555], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01709], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.606      0.707      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/99      3.61G    0.03298     0.0288   0.003956        125        640: 1\n",
      "tensor([0.76904], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01738], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.783      0.613      0.688      0.378\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/99      3.61G    0.03286      0.029   0.003946         88        640: 1\n",
      "tensor([0.71702], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01748], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.749      0.615      0.677      0.392\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/99      3.61G    0.03223    0.02794   0.003792        137        640: 1\n",
      "tensor([0.95737], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01748], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.853      0.612      0.709      0.404\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/99      3.61G    0.03213    0.02843   0.003657        166        640: 1\n",
      "tensor([0.95309], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01728], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.601      0.701      0.406\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/99      3.61G    0.03188    0.02804   0.003566        161        640: 1\n",
      "tensor([0.88974], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01729], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.821      0.597      0.691      0.402\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/99      3.61G    0.03195    0.02778   0.003624        118        640: 1\n",
      "tensor([0.79463], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01735], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.798      0.635      0.713      0.412\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/99      3.61G    0.03157     0.0277   0.003591        151        640: 1\n",
      "tensor([0.86290], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01746], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.764      0.619      0.689      0.395\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/99      3.61G    0.03135    0.02774   0.003428        133        640: 1\n",
      "tensor([0.79753], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01735], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.835      0.644      0.731      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/99      3.61G    0.03146     0.0276   0.003402        154        640: 1\n",
      "tensor([0.94800], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01750], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.801      0.605      0.693      0.393\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/99      3.61G    0.03101    0.02758   0.003511        122        640: 1\n",
      "tensor([0.80197], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01759], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.624      0.719      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/99      3.61G     0.0311    0.02743   0.003266        123        640: 1\n",
      "tensor([0.67051], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01759], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.616      0.714      0.418\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/99      3.61G     0.0309    0.02703   0.003288        127        640: 1\n",
      "tensor([0.68938], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01758], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.83      0.561      0.662       0.39\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/99      3.61G    0.03045    0.02621   0.003276        127        640: 1\n",
      "tensor([0.71381], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01753], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.817      0.635      0.718      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/99      3.61G    0.03084    0.02689   0.003341        122        640: 1\n",
      "tensor([0.80890], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01761], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.821      0.582      0.666       0.39\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/99      3.61G    0.03055    0.02709   0.003212        146        640: 1\n",
      "tensor([0.86082], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01771], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.788      0.534       0.64      0.367\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/99      3.61G    0.03003    0.02632   0.003169        202        640: 1\n",
      "tensor([0.93971], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01768], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.817      0.626      0.701      0.409\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/99      3.61G    0.03011    0.02626   0.003156         94        640: 1\n",
      "tensor([0.64026], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01758], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.802        0.6      0.698      0.414\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/99      3.61G    0.03005    0.02632   0.003095        152        640: 1\n",
      "tensor([0.86042], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01757], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.845      0.664      0.755       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/99      3.61G    0.02957    0.02608   0.003134        123        640: 1\n",
      "tensor([0.73165], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01759], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.813      0.614      0.709      0.409\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/99      3.61G     0.0299    0.02637   0.003201        162        640: 1\n",
      "tensor([0.76949], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01765], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.812      0.633      0.711      0.425\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/99      3.61G    0.02984    0.02647   0.003129        161        640: 1\n",
      "tensor([0.81941], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01766], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.864      0.625      0.732      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/99      3.61G    0.02963     0.0262    0.00293        122        640: 1\n",
      "tensor([0.69237], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01763], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.838      0.639      0.724      0.423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/99      3.61G    0.02935    0.02583   0.003121        126        640: 1\n",
      "tensor([0.69184], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01761], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.817      0.599      0.696      0.412\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/99      3.61G    0.02917    0.02591   0.003061         90        640: 1\n",
      "tensor([0.65803], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01770], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.813      0.652      0.726      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/99      3.61G    0.02898    0.02566   0.002893        118        640: 1\n",
      "tensor([0.75611], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01765], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.876       0.63       0.73      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/99      3.61G    0.02895    0.02577    0.00276        157        640: 1\n",
      "tensor([0.82283], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01765], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.597      0.715      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/99      3.61G    0.02898     0.0257   0.002877        104        640: 1\n",
      "tensor([0.59238], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01769], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852      0.632      0.734      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/99      3.61G     0.0288    0.02592   0.002729        157        640: 1\n",
      "tensor([0.76100], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01762], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.882      0.603      0.708      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/99      3.61G    0.02868    0.02524     0.0027        108        640: 1\n",
      "tensor([0.59360], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01760], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.809      0.578       0.66      0.387\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/99      3.61G    0.02866     0.0252   0.002794        159        640: 1\n",
      "tensor([0.75460], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01756], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.8      0.602      0.682        0.4\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/99      3.61G    0.02829    0.02521   0.002779        118        640: 1\n",
      "tensor([0.72345], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01756], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.843      0.607      0.702      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/99      3.61G    0.02842    0.02547   0.002722        176        640: 1\n",
      "tensor([0.91918], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01754], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.624      0.739      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/99      3.61G    0.02846    0.02516   0.002847        130        640: 1\n",
      "tensor([0.71409], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01752], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.847      0.599      0.714      0.437\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/99      3.61G    0.02795    0.02515   0.002598        178        640: 1\n",
      "tensor([0.88302], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01754], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.801      0.632        0.7      0.424\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/99      3.61G    0.02784    0.02471   0.002664        148        640: 1\n",
      "tensor([0.70157], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01746], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.88      0.605      0.721      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/99      3.61G    0.02788    0.02474    0.00261        115        640: 1\n",
      "tensor([0.67795], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01748], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.591      0.691      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/99      3.61G     0.0277    0.02454    0.00267        124        640: 1\n",
      "tensor([0.65416], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01745], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.575      0.692      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/99      3.61G     0.0274    0.02423   0.002614        163        640: 1\n",
      "tensor([0.72695], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01741], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.573      0.683       0.41\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/99      3.61G    0.02749     0.0246   0.002553        200        640: 1\n",
      "tensor([0.81116], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01738], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87       0.63      0.727      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/99      3.61G    0.02728    0.02451   0.002548        141        640: 1\n",
      "tensor([0.68683], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01736], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.85      0.639      0.724      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/99      3.61G    0.02737    0.02444    0.00267        146        640: 1\n",
      "tensor([0.71799], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01733], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.649       0.75      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/99      3.61G    0.02724     0.0244    0.00259        168        640: 1\n",
      "tensor([0.73476], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01733], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886      0.618      0.736      0.459\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      60/99      3.61G    0.02701    0.02396   0.002455        175        640: 1\n",
      "tensor([0.78584], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01730], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869        0.6      0.707      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      61/99      3.61G    0.02679    0.02421   0.002606        139        640: 1\n",
      "tensor([0.77938], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01727], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.84      0.616      0.706      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      62/99      3.61G    0.02678    0.02343   0.002465        117        640: 1\n",
      "tensor([0.63835], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01727], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.85      0.595      0.693      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      63/99      3.61G    0.02681    0.02373   0.002431        129        640: 1\n",
      "tensor([0.69148], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01724], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86      0.612      0.718       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      64/99      3.61G    0.02665    0.02381   0.002511        109        640: 1\n",
      "tensor([0.63812], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01722], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.876      0.624      0.728      0.455\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      65/99      3.61G    0.02667    0.02392   0.002359        154        640: 1\n",
      "tensor([0.80061], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01721], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.88       0.61      0.719      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      66/99      3.61G    0.02634    0.02365   0.002404        119        640: 1\n",
      "tensor([0.66728], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01718], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.905      0.587      0.717      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      67/99      3.61G    0.02628    0.02312   0.002406        153        640: 1\n",
      "tensor([0.75115], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01714], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.583      0.689      0.425\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      68/99      3.61G     0.0262    0.02361   0.002371        116        640: 1\n",
      "tensor([0.61316], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01710], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.889      0.616      0.739      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      69/99      3.61G     0.0259    0.02294   0.002368        141        640: 1\n",
      "tensor([0.75425], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01707], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.594      0.704      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      70/99      3.61G    0.02597    0.02323   0.002357        175        640: 1\n",
      "tensor([0.84175], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01705], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.614      0.711      0.432\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      71/99      3.61G    0.02612    0.02328   0.002388        161        640: 1\n",
      "tensor([0.74285], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01703], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.849      0.605      0.708      0.436\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      72/99      3.61G    0.02581     0.0229   0.002243        114        640: 1\n",
      "tensor([0.64168], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01700], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.632      0.718       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      73/99      3.61G      0.026     0.0234   0.002362        141        640: 1\n",
      "tensor([0.71527], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01698], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.596      0.711      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      74/99      3.61G    0.02576    0.02319   0.002259        133        640: 1\n",
      "tensor([0.61010], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01695], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.792      0.649      0.718      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      75/99      3.61G    0.02549    0.02274   0.002214        159        640: 1\n",
      "tensor([0.76867], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01692], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.849      0.599      0.714      0.442\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      76/99      3.61G    0.02567    0.02273   0.002325        122        640: 1\n",
      "tensor([0.59046], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01689], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.804      0.634      0.715      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      77/99      3.61G    0.02564    0.02301   0.002229        137        640: 1\n",
      "tensor([0.68773], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01686], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.873      0.587      0.704       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      78/99      3.61G    0.02553    0.02254    0.00223        137        640: 1\n",
      "tensor([0.66111], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01683], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.78       0.63      0.708      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      79/99      3.61G    0.02496    0.02208   0.002233        161        640: 1\n",
      "tensor([0.74742], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01681], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.858      0.599      0.699       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      80/99      3.61G    0.02518    0.02258   0.002182        154        640: 1\n",
      "tensor([0.64623], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01677], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.599      0.704      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      81/99      3.61G    0.02504    0.02227   0.002153        181        640: 1\n",
      "tensor([0.75010], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01674], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.9      0.594      0.714      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      82/99      3.61G    0.02492    0.02216   0.002162        149        640: 1\n",
      "tensor([0.64628], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01670], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.908      0.587      0.704       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      83/99      3.61G    0.02481    0.02222   0.002081        118        640: 1\n",
      "tensor([0.61015], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01667], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.633       0.74      0.467\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      84/99      3.61G    0.02467    0.02199    0.00211        178        640: 1\n",
      "tensor([0.75927], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01664], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.616      0.712      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      85/99      3.61G    0.02461    0.02204    0.00207        140        640: 1\n",
      "tensor([0.67576], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01662], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.633      0.738      0.467\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      86/99      3.61G    0.02467    0.02205   0.002153        119        640: 1\n",
      "tensor([0.53692], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01661], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891      0.628       0.73      0.467\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      87/99      3.61G    0.02456    0.02217   0.002056        114        640: 1\n",
      "tensor([0.51318], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01659], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891      0.618      0.718      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      88/99      3.61G    0.02463    0.02182   0.002139        117        640: 1\n",
      "tensor([0.55775], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01656], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.619      0.729      0.458\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      89/99      3.61G    0.02448    0.02197   0.002035        118        640: 1\n",
      "tensor([0.57162], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01654], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.628       0.72      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      90/99      3.61G    0.02436    0.02159   0.001994        115        640: 1\n",
      "tensor([0.59486], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01652], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.837      0.634       0.72      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      91/99      3.61G    0.02411    0.02132   0.002002        159        640: 1\n",
      "tensor([0.74213], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01650], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.892      0.612      0.717      0.452\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      92/99      3.61G    0.02415    0.02159   0.002028        165        640: 1\n",
      "tensor([0.69131], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01649], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.912      0.602      0.723      0.455\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      93/99      3.61G    0.02402    0.02125    0.00203        126        640: 1\n",
      "tensor([0.59678], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01647], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.607       0.71      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      94/99      3.61G    0.02394    0.02125   0.002043        112        640: 1\n",
      "tensor([0.57474], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01645], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.912      0.596      0.712      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      95/99      3.61G    0.02373    0.02112   0.001906        121        640: 1\n",
      "tensor([0.59475], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01644], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.845      0.609      0.705      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      96/99      3.61G    0.02398    0.02103   0.002008        195        640: 1\n",
      "tensor([0.65358], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01642], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.599      0.706      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      97/99      3.61G    0.02396    0.02118   0.001957        101        640: 1\n",
      "tensor([0.58631], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01641], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.605      0.715      0.454\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      98/99      3.61G    0.02367    0.02101    0.00201        137        640: 1\n",
      "tensor([0.58534], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01640], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.864      0.606      0.707       0.45\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      99/99      3.61G    0.02375    0.02118   0.001927        115        640: 1\n",
      "tensor([0.50448], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.01639], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.861      0.607      0.707      0.453\n",
      "\n",
      "100 epochs completed in 1.576 hours.\n",
      "Optimizer stripped from runs/train/exp61/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp61/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp61/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.634      0.739      0.468\n",
      "                   Car       1048       4012      0.911      0.809        0.9      0.648\n",
      "                   Van       1048        431      0.867       0.74       0.83      0.583\n",
      "                 Truck       1048        166      0.889      0.777       0.86      0.614\n",
      "                  Tram       1048         56       0.95      0.685      0.804      0.466\n",
      "            Pedestrian       1048        618       0.82      0.546      0.665      0.333\n",
      "        Person_sitting       1048         20      0.841      0.531      0.614      0.381\n",
      "               Cyclist       1048        234      0.879      0.474      0.609      0.331\n",
      "                  Misc       1048        138      0.746      0.507      0.627      0.384\n",
      "Results saved to \u001b[1mruns/train/exp61\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/e52a7c5443f845e0a302bcd20cc97984\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.857053668531672\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 318.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.9001832357969043\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.6477714354502833\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.9107683327488474\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.809322033898305\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 3247.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.6162296906258733\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 15.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.6094089589951882\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.3310813611680763\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.8791711493579643\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.47435897435897434\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 111.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.6040231486214149\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 24.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.627427700575553\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.3840598624341176\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.7464342058828446\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.5072463768115942\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 70.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.6555440230895406\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 74.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.6649162084875153\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.33262340656494094\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.8201317016898242\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.5459752757487385\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 337.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.6508697036772163\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.6142416156997377\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.38129660489868333\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.8412223512152135\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.5307671697393325\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.7961483859309665\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.8041271926450921\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.46624972294575934\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.9504360797111474\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.6849568814846593\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 38.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.8292193677446783\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.8603458641332014\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.6143961495923249\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.8888214984672236\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.7771084337349398\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 129.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.7985423916466837\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 49.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.8299928953334575\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.5829884371077906\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.8669521768844732\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.740139211136891\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 319.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [2610]                    : (0.5456262826919556, 2.1990232467651367)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [200]          : (0.43257836839475516, 0.754699320515714)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [200]     : (0.2275864700327992, 0.46740422068093934)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [200]        : (0.7000174873358439, 0.9122690568238594)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [200]           : (0.36696029130259056, 0.6641203383915797)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [200]           : (0.02367316745221615, 0.03843981400132179)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [200]           : (0.0019059672486037016, 0.007069788873195648)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [200]           : (0.021012984216213226, 0.03457426652312279)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [200]             : (0.032885197550058365, 0.045032814145088196)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [200]             : (0.005365967750549316, 0.01567220874130726)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [200]             : (0.05070357024669647, 0.07469940185546875)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [200]                    : (0.0002980000000000002, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [200]                    : (0.0002980000000000002, 0.009789529262086514)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/e52a7c5443f845e0a302bcd20cc97984\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp61\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.92 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 100 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/fog_02/weights/si.pt \\\n",
    "--SI_lambda 1e-1 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 这个吃完饭回来后再看。\n",
    "# 这个目前是1.0强度增量的最好参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "2d482fc0-6739-40ea-bf16-4ad67e37b6b6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/exp61/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.874       0.57      0.688      0.422\n",
      "                   Car       2244       8711      0.928      0.703      0.833      0.592\n",
      "                   Van       2244        861      0.868      0.617      0.715      0.496\n",
      "                 Truck       2244        333      0.906      0.549      0.664      0.411\n",
      "                  Tram       2244        138       0.86      0.535      0.663      0.362\n",
      "            Pedestrian       2244       1286      0.871      0.579       0.68       0.37\n",
      "        Person_sitting       2244         89      0.794      0.483      0.624      0.347\n",
      "               Cyclist       2244        496      0.878      0.481      0.594      0.337\n",
      "                  Misc       2244        284      0.883      0.613       0.73       0.46\n",
      "Speed: 0.1ms pre-process, 0.9ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp70\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp61/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "70f7c443-86fe-445c-a9ee-8da99a468960",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "325ade44-0214-4fa6-a5e6-d88e116c69b2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aecda858-5ef5-40b3-a381-ce5a403c19ac",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e5312877-0152-4437-83a6-a6433502e785",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10f1852a-841e-4f62-91e2-e034eabb3a1e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6bd2460b-c695-408f-952c-3e3480b41783",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "0e5064e5-35eb-4e33-a975-9e7b872e02d7",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/e6909282434d4b06991fe309bc4955b2\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train... 4189 images, 0 b\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/train.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp52/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp52\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.65G    0.02811    0.02799    0.00323        167        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "       0/59      3.65G    0.02782    0.02521   0.002641        128        640: 1\n",
      "tensor([0.71809], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.605      0.711       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.65G    0.03052    0.02497   0.002621        133        640: 1\n",
      "tensor([0.82617], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.882       0.71      0.793      0.478\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.65G    0.03356    0.02688   0.003171        131        640: 1\n",
      "tensor([0.79908], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841      0.653      0.741       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.65G    0.03415    0.02834   0.003714        108        640: 1\n",
      "tensor([0.78440], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.829      0.714      0.783      0.478\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.65G    0.03404    0.02846   0.004006        156        640: 1\n",
      "tensor([0.83709], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.709      0.801      0.482\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.65G    0.03328    0.02834   0.004058        123        640: 1\n",
      "tensor([0.77020], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.668      0.778      0.487\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.65G    0.03265    0.02777   0.003792        174        640: 1\n",
      "tensor([0.92278], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.842      0.752      0.806      0.496\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.65G    0.03247    0.02742   0.003867        166        640: 1\n",
      "tensor([0.98560], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.752      0.829      0.518\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.65G    0.03189     0.0277   0.003619        152        640: 1\n",
      "tensor([0.84464], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.751      0.815      0.504\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.65G    0.03183    0.02739    0.00372        136        640: 1\n",
      "tensor([0.81161], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.736      0.821      0.523\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.65G    0.03156    0.02713   0.003502        134        640: 1\n",
      "tensor([0.77102], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.755      0.832      0.526\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.65G    0.03085    0.02665   0.003466        182        640: 1\n",
      "tensor([0.86228], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.896      0.723      0.816      0.515\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.65G    0.03086    0.02695   0.003353        128        640: 1\n",
      "tensor([0.68514], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.896      0.753      0.834      0.534\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.65G    0.03023     0.0262   0.003255        112        640: 1\n",
      "tensor([0.80344], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.893       0.75       0.83      0.529\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.65G    0.03026    0.02634   0.003227        151        640: 1\n",
      "tensor([0.75301], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.885       0.77       0.84      0.546\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/59      3.65G    0.03007    0.02639   0.003203        132        640: 1\n",
      "tensor([0.75699], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.778      0.841      0.542\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/59      3.65G    0.02964    0.02598   0.003178        131        640: 1\n",
      "tensor([0.73440], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.771      0.845      0.539\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/59      3.65G    0.02982    0.02588   0.003193        159        640: 1\n",
      "tensor([0.85475], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886       0.75      0.831      0.544\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/59      3.65G    0.02942    0.02559   0.003109        125        640: 1\n",
      "tensor([0.66618], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872       0.73      0.808      0.517\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/59      3.65G    0.02934    0.02581   0.003053         88        640: 1\n",
      "tensor([0.57239], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886      0.768      0.834       0.54\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/59      3.65G    0.02885    0.02507   0.003103        137        640: 1\n",
      "tensor([0.83614], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.734      0.817      0.529\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/59      3.65G    0.02883    0.02551   0.002967        166        640: 1\n",
      "tensor([0.80130], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.914      0.754      0.836      0.552\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/59      3.65G    0.02839    0.02506   0.002852        161        640: 1\n",
      "tensor([0.79560], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.763      0.843      0.559\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/59      3.65G    0.02829    0.02474    0.00289        118        640: 1\n",
      "tensor([0.66187], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.787      0.855      0.575\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/59      3.65G    0.02799    0.02473   0.002807        151        640: 1\n",
      "tensor([0.76147], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.913      0.752      0.845      0.561\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/59      3.65G    0.02791    0.02476   0.002766        133        640: 1\n",
      "tensor([0.69878], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.752      0.831      0.549\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/59      3.65G    0.02749    0.02438   0.002614        154        640: 1\n",
      "tensor([0.82199], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898      0.779      0.844      0.562\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/59      3.65G    0.02782    0.02464   0.002777        122        640: 1\n",
      "tensor([0.70743], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.768      0.839      0.565\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/59      3.65G    0.02737    0.02438    0.00268        123        640: 1\n",
      "tensor([0.58029], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.894      0.784       0.85      0.572\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/59      3.65G    0.02723    0.02398   0.002615        127        640: 1\n",
      "tensor([0.59787], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.773      0.841      0.561\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/59      3.65G    0.02694    0.02318    0.00262        127        640: 1\n",
      "tensor([0.61050], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.915      0.747      0.831      0.565\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/59      3.65G    0.02685    0.02374   0.002596        122        640: 1\n",
      "tensor([0.68287], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.881      0.799      0.852      0.576\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/59      3.65G     0.0268    0.02386   0.002529        146        640: 1\n",
      "tensor([0.73382], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.773      0.835      0.553\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/59      3.65G    0.02655     0.0233   0.002506        202        640: 1\n",
      "tensor([0.80524], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.782      0.857       0.59\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/59      3.65G    0.02632    0.02309   0.002453         94        640: 1\n",
      "tensor([0.53475], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.884      0.783      0.849      0.575\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/59      3.65G    0.02624    0.02316   0.002433        152        640: 1\n",
      "tensor([0.75217], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.929      0.769      0.857      0.581\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/59      3.65G    0.02591    0.02284   0.002425        123        640: 1\n",
      "tensor([0.60087], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.901      0.772       0.85      0.574\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/59      3.65G    0.02575    0.02289   0.002433        162        640: 1\n",
      "tensor([0.65872], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.892       0.77      0.849      0.579\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/59      3.65G    0.02582     0.0231   0.002408        161        640: 1\n",
      "tensor([0.65679], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904      0.776      0.847      0.576\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/59      3.65G    0.02553    0.02282   0.002374        122        640: 1\n",
      "tensor([0.60730], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898      0.792      0.855      0.576\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/59      3.65G     0.0255    0.02247   0.002412        126        640: 1\n",
      "tensor([0.58129], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.875      0.787      0.846      0.581\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/59      3.65G    0.02518    0.02246   0.002349         90        640: 1\n",
      "tensor([0.55112], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.792      0.856      0.583\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/59      3.65G    0.02499    0.02228   0.002207        118        640: 1\n",
      "tensor([0.62059], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.908      0.767      0.854      0.589\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/59      3.65G    0.02491    0.02226   0.002116        157        640: 1\n",
      "tensor([0.68957], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.89      0.769      0.845       0.58\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/59      3.65G    0.02474    0.02211   0.002186        104        640: 1\n",
      "tensor([0.47904], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.911       0.76      0.849      0.588\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/59      3.65G    0.02484     0.0224   0.002078        157        640: 1\n",
      "tensor([0.64838], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.892      0.782      0.845      0.574\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/59      3.65G    0.02459    0.02173   0.002059        108        640: 1\n",
      "tensor([0.48611], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917      0.772      0.851      0.588\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/59      3.65G    0.02451    0.02169    0.00216        159        640: 1\n",
      "tensor([0.63727], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.775      0.852      0.575\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/59      3.65G    0.02421    0.02152   0.002129        118        640: 1\n",
      "tensor([0.56140], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907        0.8      0.858      0.589\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/59      3.65G     0.0243     0.0219   0.002085        176        640: 1\n",
      "tensor([0.75549], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898      0.776      0.854      0.586\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/59      3.65G    0.02426    0.02147    0.00209        130        640: 1\n",
      "tensor([0.59769], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.903      0.781      0.848      0.581\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/59      3.65G    0.02394    0.02159   0.001988        178        640: 1\n",
      "tensor([0.75290], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.896      0.783       0.85      0.594\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/59      3.65G    0.02381    0.02123   0.001994        148        640: 1\n",
      "tensor([0.62173], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904       0.77      0.854      0.593\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/59      3.65G    0.02373    0.02113   0.001963        115        640: 1\n",
      "tensor([0.55760], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.901      0.784       0.85      0.594\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/59      3.65G    0.02368    0.02092    0.00201        124        640: 1\n",
      "tensor([0.50928], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904      0.788      0.859      0.597\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/59      3.65G    0.02338    0.02062    0.00198        163        640: 1\n",
      "tensor([0.61739], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.774      0.852      0.598\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/59      3.65G    0.02334    0.02094   0.001949        200        640: 1\n",
      "tensor([0.66725], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.898      0.784      0.856        0.6\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/59      3.65G    0.02334    0.02086   0.001954        141        640: 1\n",
      "tensor([0.59996], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.773      0.852      0.596\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/59      3.65G    0.02341    0.02086    0.00202        146        640: 1\n",
      "tensor([0.58103], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.918      0.781      0.862      0.605\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/59      3.65G    0.02323    0.02064    0.00194        168        640: 1\n",
      "tensor([0.62698], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.885      0.798      0.859      0.605\n",
      "\n",
      "60 epochs completed in 0.889 hours.\n",
      "Optimizer stripped from runs/train/exp52/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp52/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp52/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.781      0.862      0.605\n",
      "                   Car       1048       4012      0.926       0.91      0.957      0.757\n",
      "                   Van       1048        431       0.95      0.903      0.953      0.753\n",
      "                 Truck       1048        166      0.967      0.916      0.967       0.76\n",
      "                  Tram       1048         56      0.945      0.919      0.951       0.71\n",
      "            Pedestrian       1048        618      0.894      0.682      0.808      0.413\n",
      "        Person_sitting       1048         20      0.847        0.5      0.614      0.361\n",
      "               Cyclist       1048        234      0.935      0.688      0.823      0.508\n",
      "                  Misc       1048        138      0.886      0.732      0.825      0.575\n",
      "Results saved to \u001b[1mruns/train/exp52\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/e6909282434d4b06991fe309bc4955b2\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.9175864059751682\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 292.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.956816714927133\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.7568642580852878\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.9257956843813218\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.9095214356929212\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 3649.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.7927850886373997\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.8229552700451442\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.5081498595652474\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.9351600783020777\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.688034188034188\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 161.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.8014979713448022\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.8249802494639601\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.5748729708295898\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.8859459715887289\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.7317480524002263\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 101.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.7737000893839502\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 50.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.8081717248907703\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.41330546064607515\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.8939454924174597\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.6819679712074533\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 421.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.6287560466664438\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.6140448353317145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.3611798802484437\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.8468232829391262\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.9319504435412155\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.950896145181106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.709904995433041\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.9449313108962232\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.9193213890582311\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 51.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.940434554002631\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.9666276617225005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.7601334297138441\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.9665840611101464\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.9156626506024096\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 152.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.9258659961145181\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 20.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.9533928625663481\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.7532178621236112\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.9504161600923483\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.9025522041763341\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 389.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [1566]                    : (0.537574827671051, 1.3534386157989502)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [120]          : (0.7110311872186416, 0.8621274672598453)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [120]     : (0.43957089214425155, 0.6051965375645663)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [120]        : (0.8286257543093856, 0.929395820430607)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [120]           : (0.6047638837410217, 0.7998035029506525)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [120]           : (0.02323015034198761, 0.034154411405324936)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [120]           : (0.0019403393380343914, 0.004057859070599079)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [120]           : (0.020622603595256805, 0.02846064418554306)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [120]             : (0.02657506801187992, 0.035512734204530716)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [120]             : (0.003500107442960143, 0.007941304706037045)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [120]             : (0.042941123247146606, 0.05593976005911827)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [120]                    : (0.00043, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/e6909282434d4b06991fe309bc4955b2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp52\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.86 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 0.6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5913bfaa-c26b-4a6e-bcb1-32a93ddd08d9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "298633b9-69f1-4da4-a71e-3e5b61247a40",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=./runs/train/fog_02/weights/si.pt, SI_lambda=0.01\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/f27e1f050cfb481eb02eedcef46571bc\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp58/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp58\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.61G    0.02871    0.02945   0.003626        196        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "       0/59      3.61G    0.02786    0.02522   0.002641        128        640: 1\n",
      "tensor([0.71696], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00028], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.865      0.605      0.712      0.444\n",
      "\u001b[1;38;5;214mCOMET WARNING:\u001b[0m Unknown error retrieving Conda package as an explicit file\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.61G    0.03045    0.02492   0.002613        133        640: 1\n",
      "tensor([0.82687], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00035], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675        0.9      0.718      0.806      0.512\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.61G    0.03361    0.02685   0.003157        131        640: 1\n",
      "tensor([0.78540], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00021], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.745      0.822      0.511\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.61G     0.0342    0.02826   0.003762        108        640: 1\n",
      "tensor([0.76343], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00024], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.843      0.651       0.74      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.61G    0.03399    0.02848    0.00404        156        640: 1\n",
      "tensor([0.87230], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00038], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.809      0.711      0.788      0.483\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.61G     0.0336    0.02842   0.004052        123        640: 1\n",
      "tensor([0.77364], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00050], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.815      0.635      0.741      0.455\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.61G    0.03291    0.02788   0.003788        174        640: 1\n",
      "tensor([0.91136], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00060], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.728      0.814        0.5\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.61G    0.03252    0.02741   0.003852        166        640: 1\n",
      "tensor([1.00496], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00067], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.866       0.74      0.818        0.5\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.61G    0.03212    0.02777   0.003737        152        640: 1\n",
      "tensor([0.84721], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00074], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.864      0.692      0.798      0.502\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.61G    0.03176    0.02739   0.003714        136        640: 1\n",
      "tensor([0.78902], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00080], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.843      0.768      0.825      0.497\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.61G    0.03157    0.02736   0.003636        134        640: 1\n",
      "tensor([0.76725], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00085], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874      0.744      0.818      0.518\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.61G    0.03105    0.02672   0.003473        182        640: 1\n",
      "tensor([0.86794], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00089], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.89       0.72      0.804      0.493\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.61G    0.03072    0.02688   0.003381        128        640: 1\n",
      "tensor([0.68583], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00093], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.899      0.731       0.84      0.539\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.61G    0.03062    0.02622   0.003243        112        640: 1\n",
      "tensor([0.71890], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00096], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.901      0.749      0.837      0.523\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.61G    0.02955    0.02641   0.003023        167        640:  ^C\n",
      "      14/59      3.61G    0.02955    0.02641   0.003023        167        640:  \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/fog_02/weights/si.pt \\\n",
    "--SI_lambda 1e-2 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# 0.6\n",
    "# 吃完饭回来再看"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b4ba4ad2-92f7-4bf8-b770-76edda83a663",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "24814311-d0e2-44b1-be99-37200a86dfff",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d55de842-4049-46b9-9bd2-ace72dad244c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a32d3b00-0449-431c-831d-46eff86bec6a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f762b7c-b221-4a4e-aca3-5688da587ea2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f83732fc-fc64-4fa0-a5a1-66ebbc25227d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "dfbbc944-5ce3-41eb-a4fe-55869232880c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 2882 commits. Use 'git pull ultralytics master' or 'git clone https://github.com/ultralytics/yolov5' to update.\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/8d6a43eedfd646bf8a4865d3c44ae51d\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train... 4189 images, 0 b\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/train.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp17/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp17\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.65G    0.03048    0.02902   0.003887        128        640: 1\n",
      "tensor([0.76055], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.741      0.418      0.495       0.29\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.65G    0.03118    0.02704   0.003313        133        640: 1\n",
      "tensor([0.88110], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.771      0.616      0.679      0.381\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.65G     0.0342    0.02906   0.003959        131        640: 1\n",
      "tensor([0.91168], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.764      0.639        0.7      0.411\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.65G    0.03523    0.03127    0.00502        108        640: 1\n",
      "tensor([0.79283], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.753      0.628      0.673       0.38\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.65G    0.03539    0.03038   0.004802        156        640: 1\n",
      "tensor([0.91785], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.805      0.559       0.67      0.388\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.65G     0.0347    0.02997   0.004582        123        640: 1\n",
      "tensor([0.79697], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.634      0.732      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.65G    0.03407    0.02907   0.004152        174        640: 1\n",
      "tensor([0.96465], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.847      0.642      0.737      0.418\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.65G    0.03359    0.02868   0.004201        166        640: 1\n",
      "tensor([1.03558], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.837       0.64      0.749      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.65G    0.03306    0.02873   0.003996        152        640: 1\n",
      "tensor([0.87545], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.765      0.616      0.693      0.404\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.65G    0.03283    0.02856   0.003995        136        640: 1\n",
      "tensor([0.79885], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841       0.63      0.729      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.65G     0.0325    0.02821   0.003809        134        640: 1\n",
      "tensor([0.81600], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.849      0.699      0.785      0.489\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.65G    0.03208    0.02757   0.003581        182        640: 1\n",
      "tensor([0.86592], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.801      0.716       0.78      0.461\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.65G    0.03163    0.02784   0.003615        128        640: 1\n",
      "tensor([0.70616], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891       0.69       0.81      0.502\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.65G    0.03149    0.02717   0.003526        112        640: 1\n",
      "tensor([0.76241], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.854      0.702        0.8      0.477\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.65G    0.03085    0.02729   0.003383        151        640: 1\n",
      "tensor([0.77926], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   ^C\n",
      "                 Class     Images  Instances          P          R      mAP50   \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e6d2d8c-4704-4aa1-b505-12be8d7fb00f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "05c9c1ea-b436-4ed9-9bd8-0122490501e7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ab8c161-8b4a-45cf-9d75-8a0a38c80805",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "88b5577a-0e5e-4768-b917-8eb4a7f78132",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "fatal: unable to access 'https://github.com/ultralytics/yolov5/': GnuTLS recv error (-110): The TLS connection was non-properly terminated.\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/7dc450e2525744e0bb96e7313129a666\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train... 4189 images, 0 b\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/train.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp45/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp45\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.65G    0.03048    0.02902   0.003887        128        640: 1\n",
      "tensor([0.76055], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.741      0.418      0.495       0.29\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.65G    0.03118    0.02704   0.003313        133        640: 1\n",
      "tensor([0.88110], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.771      0.616      0.679      0.381\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.65G     0.0342    0.02906   0.003959        131        640: 1\n",
      "tensor([0.91168], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.764      0.639        0.7      0.411\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.65G    0.03523    0.03127    0.00502        108        640: 1\n",
      "tensor([0.79283], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.753      0.628      0.673       0.38\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.65G    0.03539    0.03038   0.004802        156        640: 1\n",
      "tensor([0.91785], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.805      0.559       0.67      0.388\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.65G     0.0347    0.02997   0.004582        123        640: 1\n",
      "tensor([0.79697], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.634      0.732      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.65G    0.03407    0.02907   0.004152        174        640: 1\n",
      "tensor([0.96465], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.847      0.642      0.737      0.418\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.65G    0.03359    0.02868   0.004201        166        640: 1\n",
      "tensor([1.03558], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.837       0.64      0.749      0.445\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.65G    0.03306    0.02873   0.003996        152        640: 1\n",
      "tensor([0.87545], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.765      0.616      0.693      0.404\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.65G    0.03283    0.02856   0.003995        136        640: 1\n",
      "tensor([0.79885], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.841       0.63      0.729      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.65G     0.0325    0.02821   0.003809        134        640: 1\n",
      "tensor([0.81600], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.849      0.699      0.785      0.489\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.65G    0.03208    0.02757   0.003581        182        640: 1\n",
      "tensor([0.86592], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.801      0.716       0.78      0.461\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.65G    0.03163    0.02784   0.003615        128        640: 1\n",
      "tensor([0.70616], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891       0.69       0.81      0.502\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.65G    0.03149    0.02717   0.003526        112        640: 1\n",
      "tensor([0.76241], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.854      0.702        0.8      0.477\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.65G    0.03085    0.02729   0.003383        151        640: 1\n",
      "tensor([0.77926], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871       0.66      0.758      0.468\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/59      3.65G    0.03083    0.02719   0.003434        132        640: 1\n",
      "tensor([0.80073], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.885      0.701      0.785      0.476\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/59      3.65G    0.03054    0.02675   0.003345        131        640: 1\n",
      "tensor([0.71668], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.88      0.723      0.814      0.506\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/59      3.65G    0.03026    0.02654   0.003342        159        640: 1\n",
      "tensor([0.87040], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.858      0.698      0.793      0.487\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/59      3.65G    0.03021    0.02639   0.003313        125        640: 1\n",
      "tensor([0.68028], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.672      0.778      0.475\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/59      3.65G    0.03006    0.02663   0.003224         88        640: 1\n",
      "tensor([0.60064], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.711      0.802      0.497\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/59      3.65G    0.02959    0.02572   0.003097        137        640: 1\n",
      "tensor([0.86391], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.729      0.816      0.526\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/59      3.65G    0.02938    0.02624   0.003098        166        640: 1\n",
      "tensor([0.83961], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.901      0.695      0.799      0.502\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/59      3.65G    0.02914    0.02581    0.00298        161        640: 1\n",
      "tensor([0.82296], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.878      0.692      0.788      0.485\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/59      3.65G    0.02909    0.02549   0.003028        118        640: 1\n",
      "tensor([0.66594], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.913      0.697       0.81      0.514\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/59      3.65G    0.02866     0.0254   0.002909        151        640: 1\n",
      "tensor([0.75156], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.688      0.786      0.494\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/59      3.65G    0.02862    0.02545   0.002903        133        640: 1\n",
      "tensor([0.70504], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.919      0.704      0.803      0.512\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/59      3.65G     0.0284    0.02512   0.002834        154        640: 1\n",
      "tensor([0.85934], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883       0.71      0.803      0.517\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/59      3.65G    0.02799     0.0252   0.002917        122        640: 1\n",
      "tensor([0.70289], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.715      0.792        0.5\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/59      3.65G    0.02813    0.02506   0.002793        123        640: 1\n",
      "tensor([0.60460], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.909      0.704      0.795      0.515\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/59      3.65G    0.02782    0.02463    0.00269        127        640: 1\n",
      "tensor([0.61695], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.895      0.685      0.786      0.502\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/59      3.65G    0.02749    0.02381   0.002679        127        640: 1\n",
      "tensor([0.61998], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.907      0.693      0.794      0.517\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/59      3.65G    0.02753    0.02432   0.002743        122        640: 1\n",
      "tensor([0.73944], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.912      0.691      0.794      0.503\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/59      3.65G    0.02739    0.02457   0.002667        146        640: 1\n",
      "tensor([0.76772], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.709       0.79      0.498\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/59      3.65G    0.02723    0.02395   0.002621        202        640: 1\n",
      "tensor([0.83607], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.905      0.715       0.81      0.525\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/59      3.65G    0.02688    0.02375    0.00256         94        640: 1\n",
      "tensor([0.53753], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.894      0.704      0.801       0.52\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/59      3.65G     0.0268    0.02371   0.002523        152        640: 1\n",
      "tensor([0.72895], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.913      0.716       0.82      0.528\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/59      3.65G    0.02653    0.02352   0.002513        123        640: 1\n",
      "tensor([0.61158], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.725       0.81       0.53\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/59      3.65G    0.02639     0.0236    0.00253        162        640: 1\n",
      "tensor([0.69328], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.889      0.736      0.817      0.529\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/59      3.65G    0.02648    0.02377   0.002463        161        640: 1\n",
      "tensor([0.65910], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871       0.74      0.819      0.532\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/59      3.65G    0.02605    0.02345   0.002424        122        640: 1\n",
      "tensor([0.60966], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.884      0.724      0.815      0.529\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/59      3.65G    0.02603    0.02305   0.002442        126        640: 1\n",
      "tensor([0.59756], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.894      0.695      0.795      0.509\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/59      3.65G    0.02573    0.02309   0.002436         90        640: 1\n",
      "tensor([0.56390], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.932      0.708      0.812      0.531\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/59      3.65G    0.02553    0.02289   0.002281        118        640: 1\n",
      "tensor([0.66124], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.91      0.696      0.811       0.54\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/59      3.65G    0.02544    0.02287   0.002178        157        640: 1\n",
      "tensor([0.70369], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.903      0.722      0.822      0.547\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/59      3.65G    0.02532    0.02271   0.002252        104        640: 1\n",
      "tensor([0.47690], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.718      0.818      0.536\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/59      3.65G    0.02541    0.02301   0.002168        157        640: 1\n",
      "tensor([0.65350], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.906      0.699      0.805      0.529\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/59      3.65G    0.02517    0.02235   0.002136        108        640: 1\n",
      "tensor([0.49438], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.909      0.709      0.811      0.543\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/59      3.65G    0.02499    0.02227   0.002219        159        640: 1\n",
      "tensor([0.66820], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.928      0.698      0.811      0.532\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/59      3.65G    0.02479    0.02216    0.00217        118        640: 1\n",
      "tensor([0.58970], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.918       0.71      0.812       0.53\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/59      3.65G    0.02484    0.02254   0.002168        176        640: 1\n",
      "tensor([0.79345], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.913      0.737      0.831      0.554\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/59      3.65G    0.02487    0.02211   0.002179        130        640: 1\n",
      "tensor([0.60797], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874      0.709      0.806      0.537\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/59      3.65G    0.02445    0.02213   0.002063        178        640: 1\n",
      "tensor([0.77164], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.904      0.705      0.808       0.54\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/59      3.65G    0.02433    0.02182   0.002071        148        640: 1\n",
      "tensor([0.62637], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.751      0.826      0.548\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/59      3.65G    0.02424    0.02165   0.002033        115        640: 1\n",
      "tensor([0.59156], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.932       0.71      0.818      0.545\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/59      3.65G    0.02419    0.02149   0.002068        124        640: 1\n",
      "tensor([0.53861], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.726      0.809      0.543\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/59      3.65G    0.02391    0.02118    0.00203        163        640: 1\n",
      "tensor([0.62061], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.917      0.714      0.808       0.54\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/59      3.65G    0.02385    0.02147   0.001986        200        640: 1\n",
      "tensor([0.68435], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.946      0.706       0.82      0.548\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/59      3.65G    0.02387    0.02138   0.002015        141        640: 1\n",
      "tensor([0.60717], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.874      0.732      0.815      0.544\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/59      3.65G    0.02396    0.02138   0.002076        146        640: 1\n",
      "tensor([0.60814], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.929      0.703      0.813      0.545\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/59      3.65G    0.02376    0.02121   0.001993        168        640: 1\n",
      "tensor([0.62765], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.739      0.819       0.55\n",
      "\n",
      "60 epochs completed in 0.612 hours.\n",
      "Optimizer stripped from runs/train/exp45/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp45/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp45/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.912       0.74      0.831      0.555\n",
      "                   Car       1048       4012      0.928      0.862      0.938      0.711\n",
      "                   Van       1048        431      0.908      0.872      0.924      0.694\n",
      "                 Truck       1048        166      0.948      0.874      0.948       0.71\n",
      "                  Tram       1048         56      0.901       0.81      0.896       0.63\n",
      "            Pedestrian       1048        618      0.828      0.633      0.747      0.377\n",
      "        Person_sitting       1048         20          1      0.591       0.68      0.394\n",
      "               Cyclist       1048        234      0.918      0.623      0.755      0.429\n",
      "                  Misc       1048        138      0.865      0.652      0.759      0.493\n",
      "Results saved to \u001b[1mruns/train/exp45\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/7dc450e2525744e0bb96e7313129a666\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.8939017025650063\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 267.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.9381424763190963\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.7112789487606423\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.9283241907623236\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.8619407293634611\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 3458.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.742365968569673\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 13.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.7548870442630661\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.42904202731043045\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.9181360826850836\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.6230817453039675\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 146.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.7438418863132804\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 14.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.7594125944382439\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.49262337280939594\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.8653945612278946\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.6522301286794041\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 90.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.7174918347904599\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 81.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.7467724234213712\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.3766530442425285\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.8284085340401857\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.6327695008924782\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 391.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.7429113967468484\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.6799294750275121\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.3941003189872336\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.5909777519454938\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 12.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.8531973136712514\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 5.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.8959643814609054\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.6303135659342024\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.9007512005469522\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.810412719266886\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 45.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.9091212190559895\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.9482152141994888\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.7097919935850583\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.9477154285744388\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.8735473855955783\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 145.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.8897036630790177\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 38.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.9244604758747328\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.6939332589070826\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.907718689464675\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.8723897911832946\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 376.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [1566]                    : (0.5570012331008911, 1.6838160753250122)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [120]          : (0.49456472090862785, 0.8308264037925313)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [120]     : (0.29047997767639605, 0.5543939246809582)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [120]        : (0.7412912082527855, 0.9464729762775557)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [120]           : (0.4182080367261387, 0.750904011280842)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [120]           : (0.02376452460885048, 0.035385798662900925)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [120]           : (0.001986270770430565, 0.00501971784979105)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [120]           : (0.021184049546718597, 0.03127323463559151)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [120]             : (0.029363682493567467, 0.04022635519504547)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [120]             : (0.0039024611469358206, 0.014496175572276115)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [120]             : (0.047050707042217255, 0.06963903456926346)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [120]                    : (0.00043, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/7dc450e2525744e0bb96e7313129a666\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp45\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.88 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Please wait for assets to finish uploading (timeout is 10800 seconds)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m All assets have been sent, waiting for delivery confirmation\n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 0.9"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5fba3979-1369-4713-8c11-09e561b360e7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d00b113c-f37f-4571-9c88-c5172a3e7ae8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f3740431-aa2c-46b3-8373-8335b22f465f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "773d88dc-92bd-49ee-aab4-79fd705309b6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=False, SI_pt=None, SI_lambda=10.0\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/1a04725327e44534ac449937655542f0\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp13/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp13\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.65G    0.04572    0.05974    0.01434        171        640:  error: RPC failed; curl 16 Error in the HTTP2 framing layer\n",
      "fatal: expected flush after ref listing\n",
      "       0/59      3.65G    0.03486    0.03428   0.006893        128        640: 1\n",
      "tensor([0.87622], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.718       0.41      0.479      0.278\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.65G    0.03365    0.03056   0.005285        133        640: 1\n",
      "tensor([0.94273], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.746      0.481      0.565      0.317\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.65G    0.03646    0.03362   0.006825        131        640: 1\n",
      "tensor([1.09027], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.478      0.319      0.316      0.172\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.65G    0.03822    0.03413   0.006774        108        640: 1\n",
      "tensor([0.90334], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.641      0.416      0.458      0.247\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.65G     0.0378    0.03293   0.006198        156        640: 1\n",
      "tensor([0.95055], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.768      0.565       0.67      0.353\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.65G    0.03682    0.03231    0.00579        123        640: 1\n",
      "tensor([0.86239], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.804      0.505      0.609      0.338\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.65G    0.03617    0.03141   0.005255        174        640: 1\n",
      "tensor([0.99326], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.765      0.569      0.649      0.355\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.65G    0.03554    0.03061   0.004997        166        640: 1\n",
      "tensor([1.06467], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.771      0.631      0.696      0.406\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.65G    0.03495    0.03058   0.004649        152        640: 1\n",
      "tensor([0.91153], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.776      0.575      0.646      0.358\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.65G    0.03434    0.03036   0.004702        136        640: 1\n",
      "tensor([0.88337], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.837      0.627      0.694       0.39\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.65G    0.03435    0.03013    0.00441        134        640: 1\n",
      "tensor([0.89326], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.811      0.651      0.728      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.65G    0.03388    0.02944   0.004292        182        640: 1\n",
      "tensor([0.90920], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.81      0.553      0.658      0.364\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.65G    0.03358    0.02958   0.004126        128        640: 1\n",
      "tensor([0.75271], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.838      0.644      0.731      0.427\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      13/59      3.65G    0.03301     0.0287   0.003957        112        640: 1\n",
      "tensor([0.83493], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.84      0.632      0.731      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.65G    0.03243    0.02878   0.003925        151        640: 1\n",
      "tensor([0.81854], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.631      0.731      0.417\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/59      3.65G    0.03247    0.02882   0.003836        132        640: 1\n",
      "tensor([0.78036], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852      0.592       0.69      0.401\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/59      3.65G    0.03196    0.02833   0.003785        131        640: 1\n",
      "tensor([0.76703], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.626       0.73      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/59      3.65G    0.03185    0.02816   0.003632        159        640: 1\n",
      "tensor([0.95522], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.805      0.618      0.711      0.397\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/59      3.65G     0.0316    0.02782   0.003589        125        640: 1\n",
      "tensor([0.75048], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.85      0.636       0.73      0.431\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/59      3.65G    0.03141    0.02805   0.003604         88        640: 1\n",
      "tensor([0.71319], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.835      0.653       0.74      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/59      3.65G    0.03108    0.02716   0.003579        137        640: 1\n",
      "tensor([0.90564], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.869      0.641      0.748      0.448\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/59      3.65G      0.031    0.02766   0.003386        166        640: 1\n",
      "tensor([0.88464], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.822      0.641      0.735      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/59      3.65G    0.03045    0.02714   0.003326        161        640: 1\n",
      "tensor([0.85353], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.842      0.675      0.766      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/59      3.65G     0.0305    0.02673   0.003295        118        640: 1\n",
      "tensor([0.74691], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.88      0.628      0.745       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/59      3.65G    0.02989    0.02665   0.003153        151        640: 1\n",
      "tensor([0.80075], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.815      0.645      0.734      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/59      3.65G    0.03003    0.02681   0.003122        133        640: 1\n",
      "tensor([0.75468], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.629      0.725      0.434\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/59      3.65G    0.02966    0.02642   0.003061        154        640: 1\n",
      "tensor([0.89723], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.651      0.755      0.454\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/59      3.65G    0.02964     0.0266   0.003181        122        640: 1\n",
      "tensor([0.76277], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856       0.63      0.714      0.424\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/59      3.65G    0.02943    0.02628   0.002958        123        640: 1\n",
      "tensor([0.62618], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.829      0.681      0.761      0.454\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/59      3.65G    0.02917    0.02587   0.002907        127        640: 1\n",
      "tensor([0.67198], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.838      0.603      0.713      0.419\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/59      3.65G    0.02881    0.02496   0.002868        127        640: 1\n",
      "tensor([0.67386], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.85      0.646      0.744      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/59      3.65G    0.02887    0.02557   0.002945        122        640: 1\n",
      "tensor([0.75309], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.818      0.648      0.716      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/59      3.65G    0.02869    0.02582   0.002822        146        640: 1\n",
      "tensor([0.78916], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.823      0.658      0.723      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/59      3.65G    0.02838    0.02506   0.002759        202        640: 1\n",
      "tensor([0.86891], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.642      0.749      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/59      3.65G    0.02813    0.02494   0.002798         94        640: 1\n",
      "tensor([0.59314], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.821      0.626       0.72      0.436\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/59      3.65G    0.02804     0.0249   0.002731        152        640: 1\n",
      "tensor([0.79102], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.806      0.681      0.746      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/59      3.65G    0.02781    0.02466   0.002749        123        640: 1\n",
      "tensor([0.65625], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.845      0.665      0.747      0.455\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/59      3.65G     0.0276    0.02471   0.002674        162        640: 1\n",
      "tensor([0.70542], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.833       0.67      0.744      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/59      3.65G    0.02763    0.02498   0.002708        161        640: 1\n",
      "tensor([0.70415], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.803      0.685       0.75      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/59      3.65G    0.02738    0.02466   0.002606        122        640: 1\n",
      "tensor([0.62834], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.866      0.658      0.749      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/59      3.65G    0.02732    0.02433   0.002663        126        640: 1\n",
      "tensor([0.61479], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.824      0.684      0.747      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/59      3.65G    0.02695    0.02428   0.002623         90        640: 1\n",
      "tensor([0.58870], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.849      0.675      0.754      0.458\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/59      3.65G    0.02675    0.02399   0.002464        118        640: 1\n",
      "tensor([0.69317], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.892      0.661      0.755      0.462\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/59      3.65G    0.02669    0.02403   0.002351        157        640: 1\n",
      "tensor([0.75474], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.851      0.655      0.743      0.454\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/59      3.65G    0.02652    0.02389   0.002436        104        640: 1\n",
      "tensor([0.50739], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.664      0.762      0.468\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/59      3.65G    0.02652    0.02412   0.002339        157        640: 1\n",
      "tensor([0.66941], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.675      0.743      0.457\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/59      3.65G    0.02634    0.02344   0.002267        108        640: 1\n",
      "tensor([0.51836], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.838      0.689      0.759      0.467\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/59      3.65G    0.02616    0.02337   0.002336        159        640: 1\n",
      "tensor([0.68850], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.862      0.659      0.745      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/59      3.65G    0.02591    0.02329   0.002287        118        640: 1\n",
      "tensor([0.60599], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.823      0.646      0.733      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/59      3.65G    0.02598     0.0236   0.002329        176        640: 1\n",
      "tensor([0.83606], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.645      0.748      0.466\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/59      3.65G    0.02601    0.02323   0.002327        130        640: 1\n",
      "tensor([0.63736], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.891      0.633      0.762      0.473\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/59      3.65G    0.02552    0.02324   0.002165        178        640: 1\n",
      "tensor([0.80347], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.819       0.66      0.748      0.462\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/59      3.65G    0.02544     0.0229   0.002223        148        640: 1\n",
      "tensor([0.63847], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.886      0.624       0.74      0.468\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/59      3.65G    0.02537    0.02279   0.002156        115        640: 1\n",
      "tensor([0.60074], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.864      0.655      0.752      0.471\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      54/59      3.65G     0.0253    0.02259   0.002171        124        640: 1\n",
      "tensor([0.58438], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846       0.63      0.729      0.467\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      55/59      3.65G    0.02501    0.02228   0.002173        163        640: 1\n",
      "tensor([0.63933], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.625      0.734      0.464\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      56/59      3.65G    0.02497    0.02257   0.002132        200        640: 1\n",
      "tensor([0.72647], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871       0.63      0.727      0.461\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      57/59      3.65G    0.02495    0.02252   0.002134        141        640: 1\n",
      "tensor([0.63790], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846      0.662      0.749      0.469\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      58/59      3.65G    0.02506    0.02244   0.002185        146        640: 1\n",
      "tensor([0.63203], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.849      0.663      0.746      0.463\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      59/59      3.65G    0.02487    0.02229   0.002135        168        640: 1\n",
      "tensor([0.65860], device='cuda:0', grad_fn=<MulBackward0>) \n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.641      0.744      0.469\n",
      "\n",
      "60 epochs completed in 0.748 hours.\n",
      "Optimizer stripped from runs/train/exp13/weights/last.pt, 14.3MB\n",
      "Optimizer stripped from runs/train/exp13/weights/best.pt, 14.3MB\n",
      "\n",
      "Validating runs/train/exp13/weights/best.pt...\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.892      0.633      0.762      0.473\n",
      "                   Car       1048       4012      0.951      0.795      0.912      0.658\n",
      "                   Van       1048        431      0.934      0.752      0.855      0.596\n",
      "                 Truck       1048        166      0.929      0.783      0.862      0.613\n",
      "                  Tram       1048         56      0.905      0.677      0.834      0.501\n",
      "            Pedestrian       1048        618      0.886       0.55      0.693       0.34\n",
      "        Person_sitting       1048         20      0.774        0.4      0.587      0.323\n",
      "               Cyclist       1048        234      0.892      0.564      0.682      0.355\n",
      "                  Misc       1048        138      0.867      0.543      0.672      0.402\n",
      "Results saved to \u001b[1mruns/train/exp13\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Comet.ml Experiment Summary\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m ---------------------------------------------------------------------------------------\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Data:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     display_summary_level : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                  : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     url                   : \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/1a04725327e44534ac449937655542f0\u001b[0m\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Metrics [count] (min, max):\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_f1                         : 0.8660974908108733\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_false_positives            : 165.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5                     : 0.9117284674992724\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_mAP@.5:.95                 : 0.6576803056678355\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_precision                  : 0.9508266538588905\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_recall                     : 0.7952334057308607\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_support                    : 4012\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Car_true_positives             : 3190.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_f1                     : 0.6912407358609662\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_false_positives        : 16.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5                 : 0.6815857573560781\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_mAP@.5:.95             : 0.3552291760109441\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_precision              : 0.892362577690763\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_recall                 : 0.5641025641025641\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_support                : 234\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Cyclist_true_positives         : 132.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_f1                        : 0.6682555007058876\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_false_positives           : 11.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5                    : 0.672030101308887\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_mAP@.5:.95                : 0.4017586531504732\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_precision                 : 0.8674025595732219\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_recall                    : 0.5434782608695652\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_support                   : 138\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Misc_true_positives            : 75.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_f1                  : 0.6788571755200804\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_false_positives     : 44.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5              : 0.6927140296025465\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_mAP@.5:.95          : 0.3399452351575302\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_precision           : 0.8861471531003008\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_recall              : 0.5501618122977346\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_support             : 618\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Pedestrian_true_positives      : 340.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_f1              : 0.5273770184591521\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_false_positives : 2.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5          : 0.5869034985782577\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_mAP@.5:.95      : 0.32333013359319396\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_precision       : 0.7737821888359524\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_recall          : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_support         : 20\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Person_sitting_true_positives  : 8.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_f1                        : 0.7745855903103145\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_false_positives           : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5                    : 0.83413284027159\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_mAP@.5:.95                : 0.5006366075206465\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_precision                 : 0.9045903993209022\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_recall                    : 0.6772529804937213\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_support                   : 56\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Tram_true_positives            : 38.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_f1                       : 0.8492929834499765\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_false_positives          : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5                   : 0.862387720535793\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_mAP@.5:.95               : 0.6126761590785548\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_precision                : 0.9285194901918914\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_recall                   : 0.7825236621290401\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_support                  : 166\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Truck_true_positives           : 130.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_f1                         : 0.8331685398968889\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_false_positives            : 23.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5                     : 0.8552724580118847\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_mAP@.5:.95                 : 0.5958337717607929\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_precision                  : 0.9343805250170295\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_recall                     : 0.7517401392111369\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_support                    : 431\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Van_true_positives             : 324.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     loss [1566]                    : (0.5829600095748901, 2.199005603790283)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5 [120]          : (0.31571034535194276, 0.7658164070223539)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/mAP_0.5:0.95 [120]     : (0.17245698262411316, 0.473132572564258)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/precision [120]        : (0.47842081204701153, 0.8915991421410794)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     metrics/recall [120]           : (0.31850764134747167, 0.6885003658187612)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/box_loss [120]           : (0.02486530877649784, 0.03821562975645065)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/cls_loss [120]           : (0.0021321612875908613, 0.00689319334924221)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     train/obj_loss [120]           : (0.022277528420090675, 0.03428387641906738)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/box_loss [120]             : (0.03244195133447647, 0.05008367449045181)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/cls_loss [120]             : (0.004948457237333059, 0.016532761976122856)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val/obj_loss [120]             : (0.05033759027719498, 0.07524918019771576)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr0 [120]                    : (0.00043, 0.07011450381679389)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr1 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     x/lr2 [120]                    : (0.00043, 0.009657697201017812)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Others:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Name                        : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     Run Path                    : nagasaki-soyorin/yolov5/1a04725327e44534ac449937655542f0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_batch_metrics     : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_confusion_matrix  : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_log_per_class_metrics : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_max_image_uploads     : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_mode                  : online\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     comet_model_name            : yolov5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hasNestedParams             : True\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Parameters:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_enable           : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_lambda           : 10.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     SI_pt               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     anchor_t            : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     artifact_alias      : latest\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     batch_size          : 16\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     box                 : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     bucket              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cache               : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls                 : 0.05000000000000001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cls_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     copy_paste          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     cos_lr              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     degrees             : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     device              : \n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     entity              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     evolve_population   : data/hyps\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_lambda          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ewc_pt              : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     exist_ok            : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fl_gamma            : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     fliplr              : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     flipud              : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     freeze              : [0]\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_h               : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_s               : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hsv_v               : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|anchor_t        : 4.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|box             : 0.05\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls             : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|cls_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|copy_paste      : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|degrees         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fl_gamma        : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|fliplr          : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|flipud          : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_h           : 0.015\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_s           : 0.7\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|hsv_v           : 0.4\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|iou_t           : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lr0             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|lrf             : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mixup           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|momentum        : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|mosaic          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj             : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|obj_pw          : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|perspective     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|scale           : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|shear           : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|translate       : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_bias_lr  : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_epochs   : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|warmup_momentum : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     hyp|weight_decay    : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     image_weights       : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     imgsz               : 640\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     iou_t               : 0.2\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     label_smoothing     : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     local_rank          : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lr0                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     lrf                 : 0.01\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mixup               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     momentum            : 0.937\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     mosaic              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     multi_scale         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     name                : exp\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_console      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     ndjson_file         : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noautoanchor        : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noplots             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     nosave              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     noval               : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj                 : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     obj_pw              : 1.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     optimizer           : SGD\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     patience            : 100\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     perspective         : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     project             : runs/train\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     quad                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     rect                : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume              : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     resume_evolve       : None\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_dir            : runs/train/exp13\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     save_period         : -1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     scale               : 0.5\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     seed                : 0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     shear               : 0.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     single_cls          : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     sync_bn             : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     translate           : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     upload_dataset      : False\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_conf_threshold  : 0.001\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     val_iou_threshold   : 0.6\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_bias_lr      : 0.1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_epochs       : 3.0\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     warmup_momentum     : 0.8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     weight_decay        : 0.0005\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     workers             : 8\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m   Uploads:\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     asset                        : 13 (1.91 MB)\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-environment-definition : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-info                   : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     conda-specification          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     confusion-matrix             : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     environment details          : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     git metadata                 : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     images                       : 106\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     installed packages           : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     model graph                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m     os packages                  : 1\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1097f088-fb1e-4fb9-94f9-3aa3fdb7ddf5",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mtrain_SI: \u001b[0mweights=./runs/train/fog_02/weights/best.pt, cfg=models/yolov5s_kitti.yaml, data=data/kitti.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=60, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, evolve_population=data/hyps, resume_evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=1, artifact_alias=latest, ndjson_console=False, ndjson_file=False, ewc_pt=None, ewc_lambda=0.0, SI_enable=True, SI_pt=./runs/train/fog_02/weights/si.pt, SI_lambda=0.0055\n",
      "Command 'git fetch ultralytics' timed out after 5 seconds\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "\u001b[1;38;5;39mCOMET INFO:\u001b[0m Experiment is live on comet.com \u001b[38;5;39mhttps://www.comet.com/nagasaki-soyorin/yolov5/ad1535af955c4556ae8643daa8d247df\u001b[0m\n",
      "\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1     35061  models.yolo.Detect                      [8, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      "YOLOv5s_kitti summary: 214 layers, 7041205 parameters, 7041205 gradients, 16.0 GFLOPs\n",
      "\n",
      "Transferred 348/349 items from runs/train/fog_02/weights/best.pt\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0m1 validation error for InitSchema\n",
      "size\n",
      "  Field required [type=missing, input_value={'height': 640, 'width': ...'mask_interpolation': 0}, input_type=dict]\n",
      "    For further information visit https://errors.pydantic.dev/2.10/v/missing\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/train.cache... 4189 image\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/val.cache... 1048 images, 0\u001b[0m\n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.81 anchors/target, 0.999 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Plotting labels to runs/train/exp44/labels.jpg... \n",
      "Image sizes 640 train, 640 val\n",
      "Using 8 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp44\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       0/59      3.61G     0.0349    0.03423   0.006864        128        640: 1\n",
      "tensor([0.87958], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00012], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.703      0.401      0.478      0.272\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       1/59      3.61G    0.03348    0.03037    0.00534        129        640:  fatal: unable to access 'https://github.com/ultralytics/yolov5/': Failed to connect to github.com port 443 after 130154 ms: Connection timed out\n",
      "       1/59      3.61G    0.03385     0.0306   0.005308        133        640: 1\n",
      "tensor([0.95741], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00017], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.728      0.496      0.566      0.321\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       2/59      3.61G    0.03642     0.0331   0.006599        131        640: 1\n",
      "tensor([1.07104], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00023], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.597      0.274       0.31      0.175\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       3/59      3.61G    0.03869    0.03491   0.007268        108        640: 1\n",
      "tensor([0.91750], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00055], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.697      0.428      0.479       0.25\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       4/59      3.61G    0.03797    0.03303   0.006044        156        640: 1\n",
      "tensor([0.97952], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00082], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.718      0.474      0.547      0.295\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       5/59      3.61G    0.03701    0.03232   0.005804        123        640: 1\n",
      "tensor([0.89186], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00097], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.71      0.482      0.557      0.321\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       6/59      3.61G    0.03602    0.03123   0.005141        174        640: 1\n",
      "tensor([1.02895], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00108], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.76      0.574      0.649      0.348\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       7/59      3.61G     0.0355     0.0306   0.004969        166        640: 1\n",
      "tensor([1.07434], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00117], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.785      0.619      0.685      0.394\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       8/59      3.61G    0.03473    0.03048   0.004639        152        640: 1\n",
      "tensor([0.95018], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00125], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.719      0.528      0.591      0.324\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "       9/59      3.61G    0.03466    0.03033   0.004598        136        640: 1\n",
      "tensor([0.89252], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00133], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.769      0.642      0.701      0.406\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      10/59      3.61G    0.03425    0.03005   0.004492        134        640: 1\n",
      "tensor([0.82773], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00140], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.836      0.621      0.715      0.415\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      11/59      3.61G    0.03346     0.0292   0.004232        182        640: 1\n",
      "tensor([0.94958], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00145], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.763      0.602      0.678      0.389\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      12/59      3.61G    0.03345    0.02933   0.004072        128        640: 1\n",
      "tensor([0.73796], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00148], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.856      0.603      0.715      0.411\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      14/59      3.61G    0.03261    0.02872    0.00386        151        640: 1\n",
      "tensor([0.87213], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00154], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.794      0.648       0.71      0.405\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      15/59      3.61G    0.03216    0.02861    0.00378        132        640: 1\n",
      "tensor([0.79417], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00156], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.868      0.628      0.726      0.421\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      16/59      3.61G    0.03214    0.02823   0.003707        131        640: 1\n",
      "tensor([0.79223], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00158], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.844      0.599      0.709      0.404\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      17/59      3.61G    0.03199    0.02814   0.003711        159        640: 1\n",
      "tensor([0.91858], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00161], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.848      0.623      0.714      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      18/59      3.61G    0.03163    0.02779   0.003592        125        640: 1\n",
      "tensor([0.70429], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00162], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.828      0.581      0.684      0.388\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      19/59      3.61G    0.03148    0.02798   0.003531         88        640: 1\n",
      "tensor([0.68498], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00164], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.862      0.623      0.725      0.428\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      20/59      3.61G    0.03102     0.0271   0.003498        137        640: 1\n",
      "tensor([0.89901], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00165], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.834      0.603      0.715      0.422\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      21/59      3.61G    0.03063    0.02758   0.003349        166        640: 1\n",
      "tensor([0.84804], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00167], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.864      0.638      0.738      0.438\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      22/59      3.61G    0.03033    0.02709   0.003228        161        640: 1\n",
      "tensor([0.84144], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00168], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.656      0.752      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      23/59      3.61G     0.0305    0.02676   0.003265        118        640: 1\n",
      "tensor([0.76076], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00168], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.863      0.667       0.75      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      24/59      3.61G    0.03017    0.02671   0.003206        151        640: 1\n",
      "tensor([0.85880], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00169], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852      0.586      0.691      0.408\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      25/59      3.61G    0.02989    0.02681   0.003166        133        640: 1\n",
      "tensor([0.74743], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00169], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.887       0.63      0.733      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      26/59      3.61G    0.02949    0.02635   0.003036        154        640: 1\n",
      "tensor([0.89286], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00170], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.86      0.624      0.734       0.43\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      27/59      3.61G    0.02975    0.02659    0.00319        122        640: 1\n",
      "tensor([0.74569], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00170], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.865      0.614       0.71      0.426\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      28/59      3.61G    0.02909    0.02623   0.002931        123        640: 1\n",
      "tensor([0.60624], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00170], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871      0.629      0.737      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      29/59      3.61G    0.02928      0.026   0.002952        127        640: 1\n",
      "tensor([0.63909], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00171], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.852       0.63      0.728      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      30/59      3.61G    0.02878    0.02499   0.002916        127        640: 1\n",
      "tensor([0.67454], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00171], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.878      0.616      0.729      0.429\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      31/59      3.61G    0.02872    0.02554   0.002907        122        640: 1\n",
      "tensor([0.78659], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00171], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.81      0.635       0.72      0.435\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      32/59      3.61G    0.02874    0.02576    0.00285        146        640: 1\n",
      "tensor([0.79162], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00171], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.883      0.618      0.715      0.423\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      33/59      3.61G    0.02848    0.02516    0.00279        202        640: 1\n",
      "tensor([0.89895], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00170], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.882      0.619      0.731      0.433\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      34/59      3.61G    0.02819      0.025   0.002799         94        640: 1\n",
      "tensor([0.59743], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00170], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.87      0.617      0.729       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      35/59      3.61G    0.02808    0.02494   0.002709        152        640: 1\n",
      "tensor([0.80035], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00170], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.872      0.601      0.722      0.444\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      36/59      3.61G    0.02781     0.0247   0.002739        123        640: 1\n",
      "tensor([0.64287], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00170], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.867      0.622      0.729      0.441\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      37/59      3.61G    0.02769    0.02481    0.00276        162        640: 1\n",
      "tensor([0.70573], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00169], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.877      0.631      0.717       0.44\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      38/59      3.61G    0.02765     0.0249    0.00271        161        640: 1\n",
      "tensor([0.71438], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00169], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.846      0.637      0.735       0.46\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      39/59      3.61G    0.02733    0.02462   0.002589        122        640: 1\n",
      "tensor([0.62401], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00169], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.892      0.639      0.747      0.456\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      40/59      3.61G    0.02729    0.02425   0.002625        126        640: 1\n",
      "tensor([0.62437], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00168], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.902      0.628      0.731      0.439\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      41/59      3.61G    0.02697    0.02428   0.002604         90        640: 1\n",
      "tensor([0.59091], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00168], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.885      0.628      0.731      0.451\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      42/59      3.61G    0.02676    0.02401   0.002479        118        640: 1\n",
      "tensor([0.71229], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00167], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.897      0.626      0.729      0.449\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      43/59      3.61G    0.02669    0.02402   0.002336        157        640: 1\n",
      "tensor([0.75607], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00167], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.831      0.632      0.721      0.446\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      44/59      3.61G    0.02661    0.02392    0.00245        104        640: 1\n",
      "tensor([0.51744], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00167], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.905      0.612      0.731      0.453\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      45/59      3.61G    0.02656    0.02416   0.002315        157        640: 1\n",
      "tensor([0.66923], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00166], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.605      0.713      0.447\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      46/59      3.61G    0.02632    0.02341   0.002291        108        640: 1\n",
      "tensor([0.51867], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00165], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.881      0.643      0.739      0.459\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      47/59      3.61G    0.02622    0.02339   0.002337        159        640: 1\n",
      "tensor([0.69055], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00165], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675       0.92      0.629      0.739      0.458\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      48/59      3.61G    0.02593    0.02335    0.00233        118        640: 1\n",
      "tensor([0.62281], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00165], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.873      0.641      0.737       0.46\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      49/59      3.61G    0.02595    0.02357   0.002316        176        640: 1\n",
      "tensor([0.83469], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00164], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.879      0.633      0.738      0.466\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      50/59      3.61G    0.02601     0.0232   0.002326        130        640: 1\n",
      "tensor([0.62833], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00164], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.839      0.655      0.744      0.466\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      51/59      3.61G    0.02557    0.02325   0.002166        178        640: 1\n",
      "tensor([0.79447], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00163], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.876      0.625      0.733      0.462\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      52/59      3.61G     0.0255    0.02287   0.002201        148        640: 1\n",
      "tensor([0.63664], device='cuda:0', grad_fn=<AddBackward0>) tensor([0.00163], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       1048       5675      0.871       0.63      0.732      0.461\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\n",
      "      53/59      3.61G    0.02552    0.02287   0.002177        170        640:  ^C\n",
      "      53/59      3.61G    0.02551    0.02286   0.002175        172        640:  \n"
     ]
    }
   ],
   "source": [
    "command = f\"\"\"\n",
    "env COMET_LOG_PER_CLASS_METRICS=true python train_SI.py \\\n",
    "--img 640 \\\n",
    "--bbox_interval 1 \\\n",
    "--cfg models/yolov5s_kitti.yaml \\\n",
    "--data data/kitti.yaml \\\n",
    "--epochs 60 \\\n",
    "--weights ./runs/train/fog_02/weights/best.pt \\\n",
    "--SI_enable \\\n",
    "--SI_pt ./runs/train/fog_02/weights/si.pt \\\n",
    "--SI_lambda 5.5e-3 \\\n",
    "\"\"\"\n",
    "!{command}\n",
    "# --weights ./runs/train/exp3/weights/best.pt \\\n",
    "# 这个也吃完饭回来再看"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c70a7877-ca05-491c-b881-1b032bd4a52c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "654d8cf1-7f31-4680-a55e-e22e5fd6ff78",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "166df7fe-9326-4a17-9811-781e11475661",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "6cbcf331-1afe-46c2-8355-66419e3707ca",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "# 然后是0.9雾测试集\n",
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/datasets/fogged/fogged_strength1.0/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9d64c049-3ffb-4784-a590-4a3f27f8861c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/exp13/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test.cache... 2244 images,\u001b[0m\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.826      0.601      0.698      0.425\n",
      "                   Car       2244       8711      0.909      0.716      0.838      0.596\n",
      "                   Van       2244        861      0.841      0.633      0.717      0.494\n",
      "                 Truck       2244        333      0.853      0.594      0.697      0.424\n",
      "                  Tram       2244        138      0.732      0.634      0.726      0.378\n",
      "            Pedestrian       2244       1286      0.822      0.594      0.688      0.377\n",
      "        Person_sitting       2244         89      0.795      0.479        0.6      0.332\n",
      "               Cyclist       2244        496      0.835      0.484      0.583      0.338\n",
      "                  Misc       2244        284      0.822      0.673      0.734      0.462\n",
      "Speed: 0.1ms pre-process, 0.8ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp58\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp13/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "425a8e34-1319-441e-b1f7-255138dc3ff4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test set updated successfully!\n"
     ]
    }
   ],
   "source": [
    "#这个是使用ewc的增量训练在没有加雾的第一个数据集上的效果\n",
    "\n",
    "update_testsets = f\" \\\n",
    "rm ../datasets/kitti/images/test/* &&\\\n",
    "cp /root/autodl-tmp/testing/image_2/* ../datasets/kitti/images/test/ && \\\n",
    "echo 'Test set updated successfully!' \\\n",
    "\" \n",
    "!{update_testsets}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "3601fd66-2803-4e67-8348-dda810b208eb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/exp13/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test... 2244 images, 0 bac\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/test.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.863       0.65      0.758      0.478\n",
      "                   Car       2244       8711      0.931       0.79      0.906      0.652\n",
      "                   Van       2244        861      0.881      0.695      0.796      0.547\n",
      "                 Truck       2244        333      0.931      0.809       0.89      0.657\n",
      "                  Tram       2244        138      0.943      0.681      0.843      0.474\n",
      "            Pedestrian       2244       1286       0.83      0.596      0.691      0.365\n",
      "        Person_sitting       2244         89      0.645      0.562      0.626      0.336\n",
      "               Cyclist       2244        496      0.906      0.467      0.619      0.356\n",
      "                  Misc       2244        284      0.838      0.602      0.695       0.44\n",
      "Speed: 0.0ms pre-process, 0.9ms inference, 0.7ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp59\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 这是无雾训练集\n",
    "model = f'runs/train/exp13/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "cb380383-2d2c-4314-b689-f3dd080bf596",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mval: \u001b[0mdata=data/kitti.yaml, weights=['runs/train/fog_1.0/weights/last.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=test, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val, name=exp, exist_ok=False, half=False, dnn=False\n",
      "YOLOv5 🚀 68de71e8 Python-3.10.8 torch-2.1.2+cu118 CUDA:0 (NVIDIA vGPU-32GB, 32260MiB)\n",
      "\n",
      "Fusing layers... \n",
      "YOLOv5s_kitti summary: 157 layers, 7031701 parameters, 0 gradients, 15.8 GFLOPs\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mScanning /root/autodl-tmp/datasets/kitti/labels/test... 2244 images, 0 bac\u001b[0m\n",
      "\u001b[34m\u001b[1mtest: \u001b[0mNew cache created: /root/autodl-tmp/datasets/kitti/labels/test.cache\n",
      "                 Class     Images  Instances          P          R      mAP50   \n",
      "                   all       2244      12198      0.783      0.535      0.629      0.376\n",
      "                   Car       2244       8711      0.891      0.705      0.821      0.571\n",
      "                   Van       2244        861      0.772      0.586      0.648       0.44\n",
      "                 Truck       2244        333      0.814      0.535      0.623      0.388\n",
      "                  Tram       2244        138      0.711      0.493      0.611      0.336\n",
      "            Pedestrian       2244       1286      0.755       0.57      0.642      0.339\n",
      "        Person_sitting       2244         89      0.759      0.388      0.516      0.265\n",
      "               Cyclist       2244        496      0.865      0.423      0.531      0.288\n",
      "                  Misc       2244        284      0.697      0.577       0.64      0.381\n",
      "Speed: 0.0ms pre-process, 0.7ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n",
      "Results saved to \u001b[1mruns/val/exp60\u001b[0m\n",
      "Test set val successfully!\n"
     ]
    }
   ],
   "source": [
    "# 这是1.0雾训练集\n",
    "model = f'runs/train/fog_1.0/weights/last.pt'\n",
    "\n",
    "val_command = f\" \\\n",
    "python val.py \\\n",
    "--data data/kitti.yaml \\\n",
    "--weights {model} \\\n",
    "--task test &&\\\n",
    "echo 'Test set val successfully!' \\\n",
    "\" \n",
    "!{val_command}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45bf81ab-f517-41b8-a02d-e22ad8ae19f1",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
