{
 "cells": [
  {
   "cell_type": "raw",
   "id": "ae99c0ff-77fe-4439-94b5-bd34b9a5c37e",
   "metadata": {},
   "source": [
    "%cd /project/train/src_repo/\n",
    "!python3 gen_2d.py\n",
    "!python3 gen_mask.py\n",
    "\n",
    "!python3 trainds.py --weights /project/train/src_repo/runs/yolov5s.pt --device 0  --batch-size=8 --workers 4 \\\n",
    "--project /project/train/models/ --name o  --epoch 30\n",
    "# --exist-ok\n",
    "# --weights runs/best.pt --data data/car.yaml --workers 1 --batch-size 16 --project /project/train/models/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d002913b-15c9-4945-94cf-5bfc08320a50",
   "metadata": {},
   "outputs": [],
   "source": [
    "!git reset --hard origin/master\n",
    "!git pull\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "775d4d6c-7665-4b8b-a856-c747846ceeff",
   "metadata": {},
   "outputs": [],
   "source": [
    "%cd /project/train/src_repo/\n",
    "!cp -r ./* /project/ev_sdk/src/\n",
    "!rm /project/ev_sdk/src/ji.py\n",
    "!ln ./ji.py /project/ev_sdk/src/ji.py\n",
    "!pwd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "803a6940-442f-4c67-862b-fec0c4e4d829",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试 训练\n",
    "!cd && sh /project/train/src_repo/run.sh\n",
    "# sh project"
   ]
  },
  {
   "cell_type": "raw",
   "id": "b9332e87-4692-4d9a-af20-62380bced48a",
   "metadata": {},
   "source": [
    "%cd /project/train/src_repo/ds5/\n",
    "!python3 dete_color.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "bdf8205b-d595-4986-863f-6d0f6a171a5d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "xmls: 41\n",
      "xmls: 5\n",
      "func is running 0:00:00.055693 s\n",
      "\u001b[34m\u001b[1mtrainds: \u001b[0mweights=/project/train/models/best.pt, cfg=models/yolov5s.yaml, segcfg=models/segheads.yaml, data=data/1441.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=300, batch_size=16, imgsz=512, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache=None, image_weights=False, device=0, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=1, project=runs/train, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, patience=100, freeze=0, save_period=-1, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
      "YOLOv5 🚀 15956ea torch 1.10.0+cu102 CUDA:0 (GeForce RTX 2080 Ti, 11019MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.1, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "using pretained weights...\n",
      "Overriding model.yaml nc=80 with nc=10\n",
      "\n",
      "                 from  n    params  module                                  arguments                     \n",
      "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
      "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
      "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
      "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
      "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
      "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
      "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
      "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
      "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
      "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
      " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
      " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
      " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
      " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
      " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
      " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
      " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
      " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
      " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
      " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
      " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
      " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
      " 24      [17, 20, 23]  1   7349639  models.yolodhs.Detect                   [10, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
      " 25                16  1    295168  models.common.Conv                      [256, 128, 3, 1]              \n",
      " 26                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 27                -1  1     74496  models.common.C3                        [128, 128, 1, False]          \n",
      " 28                -1  1     73856  models.common.Conv                      [128, 64, 3, 1]               \n",
      " 29                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 30                -1  1     18496  models.common.Conv                      [64, 32, 3, 1]                \n",
      " 31                -1  1      4800  models.common.C3                        [32, 32, 1, False]            \n",
      " 32                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
      " 33                -1  1      1156  torch.nn.modules.conv.Conv2d            [32, 4, 3, 1, 1]              \n",
      "Model Summary: 401 layers, 14823755 parameters, 14823755 gradients, 73.8 GFLOPs\n",
      "\n",
      "Transferred 530/531 items from /project/train/models/best.pt\n",
      "Scaled weight_decay = 0.0005\n",
      "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 85 weight, 95 weight (no decay), 95 bias\n",
      "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/home/data/1441.cache' images and labels... 41 found, 0 missing\u001b[0m\n",
      "False Counter({0: 92036478, 2: 5158665, 1: 1036872, 3: 6449})\n",
      "cls balance : [160, 12, 18, 57, 8, 13, 2, 15, 27, 12]\n",
      "\u001b[34m\u001b[1mval: \u001b[0mScanning '/home/data/1442.cache' images and labels... 5 found, 0 missing, 0\u001b[0m\n",
      "True Counter({0: 10043946, 3: 324054})\n",
      "Plotting labels to runs/train/exp21/labels.jpg... \n",
      "\n",
      "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.65 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
      "Image sizes 512 train, 512 val\n",
      "Using 1 dataloader workers\n",
      "Logging results to \u001b[1mruns/train/exp21\u001b[0m\n",
      "Starting training for 300 epochs...\n",
      "\n",
      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
      "218/299      7.71G   0.04844   0.03901   0.03415       218       512\n",
      "Loss/Iou 0.1743379831314087 0.24207001181009008\n",
      "【 map50 0.4342561395733817 map : 0.2981396037091511\n",
      "\n",
      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
      "219/299      8.93G   0.04813    0.0454   0.03277       256       512\n",
      "Loss/Iou 0.1685260385274887 0.24233288273635628\n",
      "【 map50 0.4290522307802846 map : 0.29493557538314324\n",
      "\n",
      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
      "220/299      8.93G   0.04547    0.0414   0.03145       250       512\n",
      "Loss/Iou 0.16667185723781586 0.2422924963384416\n",
      "【 map50 0.43252227071202404 map : 0.2964165183967054\n",
      "\n",
      "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
      "221/299      8.93G   0.04565   0.04432   0.02777       283       512\n",
      "^C\n",
      "Traceback (most recent call last):\n",
      "  File \"trainds.py\", line 685, in <module>\n",
      "    main(opt)\n",
      "  File \"trainds.py\", line 582, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"trainds.py\", line 399, in train\n",
      "    ave_loss, mean_IoU, IoU_array = segval.validate(roadseg_val_loader, model, segnc,SegLoss )\n",
      "  File \"/project/train/yolov5ds/segval.py\", line 93, in validate\n",
      "    for idx, batch in enumerate(testloader):\n",
      "  File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py\", line 521, in __next__\n",
      "    data = self._next_data()\n",
      "  File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py\", line 1186, in _next_data\n",
      "    idx, data = self._get_data()\n",
      "  File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py\", line 1142, in _get_data\n",
      "    success, data = self._try_get_data()\n",
      "  File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py\", line 990, in _try_get_data\n",
      "    data = self._data_queue.get(timeout=timeout)\n",
      "  File \"/usr/lib/python3.7/queue.py\", line 179, in get\n",
      "    self.not_empty.wait(remaining)\n",
      "  File \"/usr/lib/python3.7/threading.py\", line 300, in wait\n",
      "    gotit = waiter.acquire(True, timeout)\n",
      "KeyboardInterrupt\n"
     ]
    }
   ],
   "source": [
    "!sh /project/train/yolov5ds/r.sh"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e834aae9-ad4a-4f55-b98e-67d3dc026eec",
   "metadata": {},
   "outputs": [],
   "source": [
    "!rm -r /project/train/models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5154a95c-8e5c-41cc-b934-41fa178620c3",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "import psutil\n",
    "import os,datetime,time\n",
    "\n",
    "def getMemCpu():\n",
    " data = psutil.virtual_memory()\n",
    " total = data.total #总内存,单位为byte\n",
    " free = data.available #可以内存\n",
    " memory = \"Memory total %dG/%dG usage:%d \"%(total>>30,free>>30,int(round(data.percent)))+\"%\"+\" \"\n",
    " cpu = \"CPU:%0.2f\"%psutil.cpu_percent(interval=1)+\"%\"\n",
    " return memory+cpu\n",
    "ret=getMemCpu()\n",
    "print(ret)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b7c963e5-14f9-402f-89fc-ceb82e79f3eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install GPUtil"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "49b86729-e6fb-47b7-84d5-040ee34c5115",
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "print(time.strftime('%H:%M:%S', time.localtime(time.time())))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "a38144b0-b07b-46e5-aa9e-7a32f14c6fb0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "xmls: 41\n",
      "xmls: 5\n",
      "func is running 0:00:00.058206 s\n",
      "\u001b[34m\u001b[1mtrainds: \u001b[0mweights=./runs/train/exp11/weights/best.pt, cfg=models/yolov5s.yaml, segcfg=models/segheads.yaml, data=data/1441.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=300, batch_size=1, imgsz=512, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache=None, image_weights=False, device=0, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=1, project=runs/train, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, patience=100, freeze=0, save_period=-1, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
      "YOLOv5 🚀 15956ea torch 1.10.0+cu102 CUDA:0 (GeForce RTX 2080 Ti, 11019MiB)\n",
      "\n",
      "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.1, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
      "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n",
      "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
      "using pretained weights...\n",
      "^C\n",
      "Traceback (most recent call last):\n",
      "  File \"trainds.py\", line 687, in <module>\n",
      "    main(opt)\n",
      "  File \"trainds.py\", line 584, in main\n",
      "    train(opt.hyp, opt, device, callbacks)\n",
      "  File \"trainds.py\", line 123, in train\n",
      "    ckpt = torch.load(weights, map_location=device)  # load checkpoint\n",
      "  File \"/usr/local/lib/python3.7/dist-packages/torch/serialization.py\", line 594, in load\n",
      "    with _open_file_like(f, 'rb') as opened_file:\n",
      "  File \"/usr/local/lib/python3.7/dist-packages/torch/serialization.py\", line 230, in _open_file_like\n",
      "    return _open_file(name_or_buffer, mode)\n",
      "  File \"/usr/local/lib/python3.7/dist-packages/torch/serialization.py\", line 211, in __init__\n",
      "    super(_open_file, self).__init__(open(name, mode))\n",
      "FileNotFoundError: [Errno 2] No such file or directory: 'runs/train/exp11/weights/best.pt'\n"
     ]
    }
   ],
   "source": [
    "!python3 gen_2d.py \n",
    "!python3 trainds.py --data data/1441.yaml --cfg models/yolov5s.yaml --weights ./runs/train/exp11/weights/best.pt  --device 0  --batch-size=1 --workers 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28b1b40e-c703-4c6d-96d5-785faa2ead09",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !pip install pynvml\n",
    "\n",
    "from multiprocessing import shared_memory\n",
    "\n",
    "\n",
    "shm = shared_memory.SharedMemory(name='shmTest555666', create=True, size=10)\n",
    "\n",
    "buf = shm.buf\n",
    "print('Process A, buf len: {}, {}'.format(len(buf), shm.size))\n",
    "print('shm name: {}'.format(shm.name))\n",
    "\n",
    "buf[:4] = bytearray([22, 33, 44, 55])\n",
    "buf[4] = 100\n",
    "\n",
    "\n",
    "while True:\n",
    "    if buf[4] == 200: # 当buf[4]变成200，就结束循环\n",
    "        break\n",
    "\n",
    "\n",
    "shm.close() # 关闭共享内存\n",
    "shm.unlink() # 释放共享内存，也可以由B进程释放\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ebc903ad-e9e6-4fc5-a984-0e489fa53709",
   "metadata": {},
   "outputs": [],
   "source": [
    "import GPUtil\n",
    "import time\n",
    "time_x = []\n",
    "gpu_y = []\n",
    "\n",
    "def gpu_util_timer(self):\n",
    "    for n in range(10):\n",
    "        Graph_Util.gpu_y.append(GPUtil.showUtilization())\n",
    "        Graph_Util.time_x.append(n)\n",
    "        time.sleep(1)\n",
    "    print('gpu done')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85af138f-292c-49d6-aed3-ddcd637bcbfa",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.5"
  },
  "vscode": {
   "interpreter": {
    "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
