{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "4f9c2d75",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated\n",
      "and will be removed in future. Use torchrun.\n",
      "Note that --use_env is set by default in torchrun.\n",
      "If your script expects `--local_rank` argument to be set, please\n",
      "change it to read from `os.environ['LOCAL_RANK']` instead. See \n",
      "https://pytorch.org/docs/stable/distributed.html#launch-utility for \n",
      "further instructions\n",
      "\n",
      "  warnings.warn(\n",
      "WARNING:torch.distributed.run:\n",
      "*****************************************\n",
      "Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n",
      "*****************************************\n",
      "\u001b[34m\u001b[1mcaption/train: \u001b[0mweights=, cfg=models/caption/gyolo.yaml, data=data/coco.yaml, hyp=data/hyps/hyp.scratch-cap.yaml, epochs=60, batch_size=70, imgsz=640, rect=False, resume=False, nosave=False, noval=True, noautoanchor=False, noplots=True, evolve=None, bucket=, cache=None, image_weights=False, device=0,1,2,3,4,5,6,7,8,9, multi_scale=False, single_cls=False, optimizer=AdamW, sync_bn=False, workers=8, project=runs/train-cap, name=gyolo, exist_ok=False, quad=False, cos_lr=False, flat_cos_lr=True, fixed_lr=False, poly_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=1, seed=0, local_rank=0, close_mosaic=2, mask_ratio=4, no_overlap=True\n",
      "YOLO 🚀 2024-11-3 Python-3.8.12 torch-1.11.0a0+b6df043 CUDA:0 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:1 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:2 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:3 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:4 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:5 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:6 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:7 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:8 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "                                                       CUDA:9 (NVIDIA GeForce RTX 3090, 24253MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.004, lrf=0.125, momentum=0.937, weight_decay=0.005, warmup_epochs=0.6, warmup_momentum=0.8, warmup_bias_lr=0.01, box=7.5, cls=0.5, cls_pw=1.0, obj=0.7, obj_pw=1.0, dfl=1.5, iou_t=0.2, anchor_t=5.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.9, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.15, copy_paste=0.3, all_tasks={'vision': ['detection', 'semantic'], 'language': ['caption']}, caption_lr=0.0001, caption_weight_decay=0.01, caption_lr_drop=20, caption_clip_max_norm=1.0, caption_max_length=20, caption_loss_gain=0.1, caption_tokenizer=custom, caption_vocab_path=./data/vocab.txt, caption_vocab_size=10202, caption_pad_idx=0, caption_bos_idx=2, caption_eos_idx=3, caption_use_gri_feat=True, caption_use_reg_feat=True, caption_grid_feat_dim=512, caption_beam_size=5, caption_beam_len=20, caption_dropout=0.2, caption_attn_dropout=0.2, caption_max_len=128, caption_d_model=256, caption_n_heads=8, caption_grid_net={'n_memories': 1, 'n_layers': 3}, caption_cap_generator={'decoder_name': 'parallel', 'n_layers': 3}\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cap', view at http://localhost:6006/\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1         0  models.common.Silence                   []                            \n",
      "  1                -1  1      1856  models.common.Conv                      [3, 64, 3, 2]                 \n",
      "  2                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  3                -1  1    212864  models.common.RepNCSPELAN4              [128, 256, 128, 64, 1]        \n",
      "  4                -1  1    164352  models.common.ADown                     [256, 256]                    \n",
      "  5                -1  1    847616  models.common.RepNCSPELAN4              [256, 512, 256, 128, 1]       \n",
      "  6                -1  1    656384  models.common.ADown                     [512, 512]                    \n",
      "  7                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       \n",
      "  8                -1  1    656384  models.common.ADown                     [512, 512]                    \n",
      "  9                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       \n",
      " 10                -1  1    656896  models.common.SPPELAN                   [512, 512, 256]               \n",
      " 11                -1  1    990976  models.common.PSA                       [512, 512]                    \n",
      " 12                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 13           [-1, 7]  1         0  models.common.Concat                    [1]                           \n",
      " 14                -1  1   3119616  models.common.RepNCSPELAN4              [1024, 512, 512, 256, 1]      \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 5]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1    912640  models.common.RepNCSPELAN4B             [1024, 256, 256, 128, 1]      \n",
      " 18                -1  1    164352  models.common.ADown                     [256, 256]                    \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1   2988544  models.common.RepNCSPELAN4B             [768, 512, 512, 256, 1]       \n",
      " 21                -1  1    656384  models.common.ADown                     [512, 512]                    \n",
      " 22          [-1, 11]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   3119616  models.common.RepNCSPELAN4B             [1024, 512, 512, 256, 1]      \n",
      " 24                 5  1    131328  models.common.CBLinear                  [512, [256]]                  \n",
      " 25                 7  1    393984  models.common.CBLinear                  [512, [256, 512]]             \n",
      " 26                 9  1    656640  models.common.CBLinear                  [512, [256, 512, 512]]        \n",
      " 27                 0  1      1856  models.common.Conv                      [3, 64, 3, 2]                 \n",
      " 28                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      " 29                -1  1    212864  models.common.RepNCSPELAN4              [128, 256, 128, 64, 1]        \n",
      " 30                -1  1    164352  models.common.ADown                     [256, 256]                    \n",
      " 31  [24, 25, 26, -1]  1         0  models.common.CBFuse                    [[0, 0, 0]]                   \n",
      " 32                -1  1    847616  models.common.RepNCSPELAN4              [256, 512, 256, 128, 1]       \n",
      " 33                -1  1    656384  models.common.ADown                     [512, 512]                    \n",
      " 34      [25, 26, -1]  1         0  models.common.CBFuse                    [[1, 1]]                      \n",
      " 35                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       \n",
      " 36                -1  1    656384  models.common.ADown                     [512, 512]                    \n",
      " 37          [26, -1]  1         0  models.common.CBFuse                    [[2]]                         \n",
      " 38                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       \n",
      " 39                32  1   1069568  models.common.RepNCSPELAN4              [512, 512, 256, 128, 2]       \n",
      " 40                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 41                -1  1      5120  models.common.DWConv                    [512, 256, 3, 1]              \n",
      " 42                -1  1     66048  models.common.Conv                      [256, 256, 1, 1]              \n",
      " 43                17  1    872448  models.common.RepNCSPELAN4              [256, 256, 256, 128, 2]       \n",
      " 44                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 45                -1  1      2816  models.common.DWConv                    [256, 256, 3, 1]              \n",
      " 46                -1  1     66048  models.common.Conv                      [256, 256, 1, 1]              \n",
      " 47                32  1   1069568  models.common.RepNCSPELAN4              [512, 512, 256, 128, 2]       \n",
      " 48                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 49                -1  1      5120  models.common.DWConv                    [512, 256, 3, 1]              \n",
      " 50                -1  1     66048  models.common.Conv                      [256, 256, 1, 1]              \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 51                17  1    872448  models.common.RepNCSPELAN4              [256, 256, 256, 128, 2]       \n",
      " 52                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 53                -1  1      2816  models.common.DWConv                    [256, 256, 3, 1]              \n",
      " 54                -1  1     66048  models.common.Conv                      [256, 256, 1, 1]              \n",
      " 55[32, 35, 38, 17, 20, 23, 17, 20, 23, 42, 46, 50, 54]  1   9571034  models.yolo.DualDPanoptic               [80, 93, 32, 256, [512, 512, 512, 256, 512, 512, 256, 512, 512, 256, 256, 256, 256]]\n",
      " 56[17, 20, 23, 11, 44, 55]  1   8418304  models.yolo.Grit                        [[256, 512, 512, 512, 256, 256]]\n",
      " 57          [55, 56]  1         0  models.yolo.OutputLayer                 []                            \n",
      "gyolo summary: 1650 layers, 52601178 parameters, 52568106 gradients\n",
      "\n",
      "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m AdamW(lr=0.004) with parameter groups 388 weight(decay=0.0), 420 weight(decay=0.0054687500000000005), 417 bias\n",
      "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/kinyiu/datasets/coco/train2017_stuff.cache' images and labels.\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/kinyiu/datasets/coco/train2017.cache' images and labels... 586\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/kinyiu/datasets/coco/train2017_stuff.cache' images and labels.\u001b[0m\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000010149.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000425369.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000508538.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/train2017/000000522365.jpg: 1 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning '/kinyiu/datasets/coco/val2017_stuff.cache' images and labels... 5\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000252219.jpg: 13 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000397133.jpg: 32 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000174482.jpg: 18 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000403385.jpg: 11 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000037777.jpg: 26 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000087038.jpg: 21 duplicate labels removed\n",
      "Load ann cache /kinyiu/datasets/coco/val_cap.cache\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning '/kinyiu/datasets/coco/val2017.cache' images and labels... 4952 fo\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning '/kinyiu/datasets/coco/val2017_stuff.cache' images and labels... 5\u001b[0m\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000252219.jpg: 13 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000397133.jpg: 32 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000174482.jpg: 18 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000403385.jpg: 11 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000037777.jpg: 26 duplicate labels removed\n",
      "\u001b[34m\u001b[1mval: \u001b[0mWARNING ⚠️ /kinyiu/datasets/coco/images/val2017/000000087038.jpg: 21 duplicate labels removed\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Load ann cache /kinyiu/datasets/coco/train_cap.cache\n",
      "Image sizes 640 train, 640 val\n",
      "Using 70 dataloader workers\n",
      "Logging results to \u001b[1mruns/train-cap/gyolo\u001b[0m\n",
      "Starting training for 60 epochs...\n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       0/59      20.9G       1.72     0.5921      2.612      1.575    0.06265   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       1/59      21.8G      1.511     0.5145      2.044      1.428    0.05651   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       2/59      21.8G      1.449     0.4865      1.865      1.383    0.05366   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       3/59      21.8G      1.412     0.4701      1.765      1.358    0.05227   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       4/59      21.8G      1.391     0.4622      1.708      1.345    0.05121   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       5/59      21.8G      1.375     0.4573       1.67      1.332    0.05062   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       6/59      21.8G       1.37     0.4526      1.643      1.326    0.05034   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       7/59      21.8G      1.357     0.4488      1.615      1.317    0.04978   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       8/59      21.8G      1.352     0.4455      1.598      1.315    0.04965   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "       9/59      21.8G      1.342     0.4434      1.582      1.309    0.04948   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      10/59      21.8G      1.338     0.4396      1.568      1.306    0.04906   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      11/59      21.8G      1.334     0.4376       1.56      1.304    0.04891   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      12/59      21.8G       1.33     0.4361      1.541      1.298    0.04847   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      13/59      21.8G      1.327     0.4341      1.537      1.297    0.04832   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      14/59      21.8G      1.327     0.4332      1.527      1.295    0.04795   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      15/59      21.8G      1.321     0.4316      1.524      1.292    0.04805   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      16/59      21.8G      1.318     0.4295      1.516      1.292    0.04809   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      17/59      21.8G      1.318      0.431      1.512      1.289     0.0479   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      18/59      21.8G      1.317     0.4312      1.511       1.29    0.04832   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      19/59      21.8G       1.31     0.4267      1.501      1.287     0.0479   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      20/59      21.8G      1.311     0.4277      1.498      1.287    0.04791   \n",
      "\n",
      "      Epoch    GPU_mem   box_loss   seg_loss   cls_loss   dfl_loss   fcl_loss   dic_loss   cap_loss  Instances       Size\n",
      "      21/59      21.8G      1.305     0.4255      1.489      1.285    0.04777   ^C\n",
      "WARNING:torch.distributed.elastic.agent.server.api:Received 2 death signal, shutting down workers\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545147 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545148 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545149 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545150 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545151 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545152 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545153 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545154 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545155 closing signal SIGINT\n",
      "WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 1545156 closing signal SIGINT\n",
      "Exception in thread Thread-22:\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.8/threading.py\", line 932, in _bootstrap_inner\n",
      "    self.run()\n",
      "  File \"/opt/conda/lib/python3.8/threading.py\", line 870, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/utils/data/_utils/pin_memory.py\", line 28, in _pin_memory_loop\n",
      "    r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/queues.py\", line 116, in get\n",
      "    return _ForkingPickler.loads(res)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/multiprocessing/reductions.py\", line 295, in rebuild_storage_fd\n",
      "    fd = df.detach()\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/resource_sharer.py\", line 57, in detach\n",
      "    with _resource_sharer.get_connection(self._id) as conn:\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/resource_sharer.py\", line 87, in get_connection\n",
      "Exception in thread     Thread-22c = Client(address, authkey=process.current_process().authkey):\n",
      "\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/connection.py\", line 502, in Client\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.8/threading.py\", line 932, in _bootstrap_inner\n",
      "    c = SocketClient(address)\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/connection.py\", line 630, in SocketClient\n",
      "    s.connect(address)\n",
      "FileNotFoundError: [Errno 2] No such file or directory\n",
      "    self.run()\n",
      "  File \"/opt/conda/lib/python3.8/threading.py\", line 870, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/utils/data/_utils/pin_memory.py\", line 28, in _pin_memory_loop\n",
      "    r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/queues.py\", line 116, in get\n",
      "    return _ForkingPickler.loads(res)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/multiprocessing/reductions.py\", line 295, in rebuild_storage_fd\n",
      "    fd = df.detach()\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/resource_sharer.py\", line 57, in detach\n",
      "    with _resource_sharer.get_connection(self._id) as conn:\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/resource_sharer.py\", line 87, in get_connection\n",
      "    c = Client(address, authkey=process.current_process().authkey)\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/connection.py\", line 502, in Client\n",
      "    c = SocketClient(address)\n",
      "  File \"/opt/conda/lib/python3.8/multiprocessing/connection.py\", line 630, in SocketClient\n",
      "    s.connect(address)\n",
      "FileNotFoundError: [Errno 2] No such file or directory\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "      21/59      21.8G      1.305     0.4255      1.489      1.285    0.04777   \n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "Traceback (most recent call last):\n",
      "  File \"caption/train.py\", line 752, in <module>\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    scaler.scale(loss).backward()\n",
      "      File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "main(opt)    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "        Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward passtrain(opt.hyp, opt, device, callbacks)\n",
      "\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "KeyboardInterrupt\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n",
      "    main(opt)\n",
      "  File \"caption/train.py\", line 649, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"caption/train.py\", line 392, in train\n",
      "    scaler.scale(loss).backward()\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 352, in backward\n",
      "    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\n",
      "  File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\n",
      "    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "KeyboardInterrupt\n"
     ]
    }
   ],
   "source": [
    "!CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7,8,9 python -m torch.distributed.launch --nproc_per_node 10 --master_port 2882 caption/train.py --data data/coco.yaml --epochs 60 --batch 70 --img 640 --cfg models/caption/gyolo.yaml --name gyolo --weights '' --hyp data/hyps/hyp.scratch-cap.yaml --device 0,1,2,3,4,5,6,7,8,9 --optimizer AdamW --flat-cos-lr --no-overlap --close-mosaic 2 --save-period 1 --noval --noplots"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "46ef0770",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
