{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 检查下版本，看是否安装成功"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "# !python -c \"import mindspore;print(mindspore.__version__)\""
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 先做遥感数据的语义分割吧\n",
    ">提取下遥感数据"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "import moxing as mox\r\n",
    "mox.file.copy_parallel('obs://lawther-bisai/dataset/YG56723','./data')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 上面的步骤事从桶里下载数据，如果要进行复现请在/home/ma-user/work下建立data文件夹，并将里面放入测试数据，格式如下\r\n",
    "- data\r\n",
    "    - images\r\n",
    "    - labels\r\n",
    "\r\n",
    "images中存放图片，label中存放标签。\r\n"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 写个脚本处理下数据"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "source": [
    "cd .."
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "/home/ma-user/work\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "source": [
    "import os\r\n",
    "import random\r\n",
    "\r\n",
    "\r\n",
    "random.seed(0)\r\n",
    "root = './data'\r\n",
    "img_dir = os.path.join(root, 'images')\r\n",
    "mask_dir = os.path.join(root, 'labels')\r\n",
    "\r\n",
    "img_lists = os.listdir(img_dir)\r\n",
    "mask_lists = os.listdir(mask_dir)\r\n",
    "\r\n",
    "print(f'There are {len(img_lists)} images')\r\n",
    "lists = [fn[:-4] for fn in img_lists]\r\n",
    "print(len(lists))\r\n",
    "num_train = int(len(img_lists) * 0.9)\r\n",
    "\r\n",
    "with open(os.path.join(root,'vocdata.txt'), 'w') as f:\r\n",
    "    for index in lists[:num_train]:\r\n",
    "        f.writelines(f'images/{index}.png labels/{index}_mask.png\\n')\r\n",
    "with open(os.path.join(root,'eval.txt'), 'w') as f:\r\n",
    "    for index in lists[num_train:]:\r\n",
    "        f.writelines(f'images/{index}.png labels/{index}_mask.png\\n')"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "There are 20001 images\n",
      "20001\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 删除vocdata.txt的第一行"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 把deeplabv3代码拉过来"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "import moxing as mox\r\n",
    "mox.file.copy_parallel('obs://lawther-bisai/deeplabv3plus/','deeplabv3')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "cd deeplabv3"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "/home/ma-user/work/deeplabv3\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "!pip install -r requirements.txt"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "!pip install opencv-python"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "pwd"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "'/home/ma-user/work'"
      ]
     },
     "metadata": {},
     "execution_count": 1
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "source": [
    "cd data"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "/home/ma-user/work/data\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "import numpy as np\r\n",
    "import cv2"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "source": [
    "with open('/home/ma-user/work/data/eval.txt') as f:\r\n",
    "    img_lst = f.readlines()\r\n",
    "with open('/home/ma-user/work/data/vocdata.txt') as f:\r\n",
    "    msk_lst = f.readlines()"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "source": [
    "for i in img_lst:\r\n",
    "    img_path, msk_path = i.strip().split(' ')\r\n",
    "#     print(img_path)\r\n",
    "    img_ = cv2.imread(img_path)\r\n",
    "    msk_ = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE)\r\n",
    "    msk_ = (msk_/255)\r\n",
    "    cv2.imwrite(msk_path,msk_)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "source": [
    "for i in msk_lst:\r\n",
    "    img_path, msk_path = i.strip().split(' ')\r\n",
    "#     print(img_path)\r\n",
    "    img_ = cv2.imread(img_path)\r\n",
    "    msk_ = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE)\r\n",
    "    msk_ = (msk_/255)\r\n",
    "    cv2.imwrite(msk_path,msk_)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "ls"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "source": [
    "!python ./src/tools/get_dataset_mindrecord.py --data_root='/home/ma-user/work/data' --data_lst='/home/ma-user/work/data/vocdata.txt' --dst_path='/home/ma-user/work/deeplabv3/MIND.mindrecord' --num_shards=1 --shuffle=True "
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "number of samples: 17999\n",
      "number of samples written: 1000\n",
      "number of samples written: 2000\n",
      "number of samples written: 3000\n",
      "number of samples written: 4000\n",
      "number of samples written: 5000\n",
      "number of samples written: 6000\n",
      "number of samples written: 7000\n",
      "number of samples written: 8000\n",
      "number of samples written: 9000\n",
      "number of samples written: 10000\n",
      "number of samples written: 11000\n",
      "number of samples written: 12000\n",
      "number of samples written: 13000\n",
      "number of samples written: 14000\n",
      "number of samples written: 15000\n",
      "number of samples written: 16000\n",
      "number of samples written: 17000\n",
      "number of samples written: 17999\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "# !wget https://download.pytorch.org/models/resnet101-5d3b4d8f.pth"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 下需要把torch预训练参数转换过来，所以装一个torch"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "!pip install torch"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "!python convert.py"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "source": [
    "cd .."
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "/home/ma-user/work\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "source": [
    "cd deeplabv3"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "/home/ma-user/work/deeplabv3\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "source": [
    "!python train.py --train_dir=./ckpt  \\\n",
    "                              --data_file='MIND.mindrecord'  \\\n",
    "                              --train_epochs=300  \\\n",
    "                                --batch_size=32  \\\n",
    "                                --crop_size=256  \\\n",
    "                                --base_lr=0.08  \\\n",
    "                                --lr_type=cos  \\\n",
    "                                --min_scale=0.5  \\\n",
    "                                --max_scale=2.0  \\\n",
    "                                --ignore_label=255  \\\n",
    "                                --num_classes=2  \\\n",
    "                                --model=DeepLabV3plus_s16  \\\n",
    "                                --ckpt_pre_trained='resnet101-5d3b4d8f.ckpt'  \\\n",
    "                                --is_distributed  \\\n",
    "                                --save_steps=410  \\\n",
    "                                --keep_checkpoint_max=200"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[WARNING] ME(14869:281473128734304,MainProcess):2021-08-23-16:34:06.793.435 [mindspore/train/serialization.py:442] 566 parameters in the net are not loaded.\n",
      "[WARNING] DEVICE(14869,python):2021-08-23-16:34:48.735.192 [mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc:282] TagRaiseReduce] node:[DropoutGenMask]reduce precision from int64 to int32\n",
      "[WARNING] SESSION(14869,python):2021-08-23-16:34:52.058.284 [mindspore/ccsrc/backend/session/ascend_session.cc:1396] SelectKernel] There are 117 node/nodes used raise precision to selected the kernel!\n",
      "[WARNING] SESSION(14869,python):2021-08-23-16:34:52.058.335 [mindspore/ccsrc/backend/session/ascend_session.cc:1400] SelectKernel] There are 1 node/nodes used reduce precision to selected the kernel!\n",
      "epoch: 1 step: 562, loss is 0.42591965\n",
      "epoch time: 224198.625 ms, per step time: 398.930 ms\n",
      "epoch: 2 step: 562, loss is 0.3811133\n",
      "epoch time: 53667.018 ms, per step time: 95.493 ms\n",
      "epoch: 3 step: 562, loss is 0.34957543\n",
      "epoch time: 53484.149 ms, per step time: 95.168 ms\n",
      "epoch: 4 step: 562, loss is 0.33503178\n",
      "epoch time: 53527.766 ms, per step time: 95.245 ms\n",
      "epoch: 5 step: 562, loss is 0.31652313\n",
      "epoch time: 52315.016 ms, per step time: 93.087 ms\n",
      "epoch: 6 step: 562, loss is 0.3301006\n",
      "epoch time: 53747.622 ms, per step time: 95.636 ms\n",
      "epoch: 7 step: 562, loss is 0.34960142\n",
      "epoch time: 52556.844 ms, per step time: 93.518 ms\n",
      "epoch: 8 step: 562, loss is 0.23465896\n",
      "epoch time: 54079.817 ms, per step time: 96.227 ms\n",
      "epoch: 9 step: 562, loss is 0.33514664\n",
      "epoch time: 52931.757 ms, per step time: 94.185 ms\n",
      "epoch: 10 step: 562, loss is 0.27784622\n",
      "epoch time: 54146.508 ms, per step time: 96.346 ms\n",
      "epoch: 11 step: 562, loss is 0.27906287\n",
      "epoch time: 54401.318 ms, per step time: 96.799 ms\n",
      "epoch: 12 step: 562, loss is 0.3693288\n",
      "epoch time: 53147.338 ms, per step time: 94.568 ms\n",
      "epoch: 13 step: 562, loss is 0.26306337\n",
      "epoch time: 53912.630 ms, per step time: 95.930 ms\n",
      "epoch: 14 step: 562, loss is 0.30507398\n",
      "epoch time: 52178.685 ms, per step time: 92.845 ms\n",
      "epoch: 15 step: 562, loss is 0.30293453\n",
      "epoch time: 52355.912 ms, per step time: 93.160 ms\n",
      "epoch: 16 step: 562, loss is 0.25257698\n",
      "epoch time: 53405.455 ms, per step time: 95.028 ms\n",
      "epoch: 17 step: 562, loss is 0.24839163\n",
      "epoch time: 53496.323 ms, per step time: 95.189 ms\n",
      "epoch: 18 step: 562, loss is 0.2100479\n",
      "epoch time: 51779.506 ms, per step time: 92.134 ms\n",
      "epoch: 19 step: 562, loss is 0.20946473\n",
      "epoch time: 53513.734 ms, per step time: 95.220 ms\n",
      "epoch: 20 step: 562, loss is 0.2679774\n",
      "epoch time: 53461.452 ms, per step time: 95.127 ms\n",
      "epoch: 21 step: 562, loss is 0.33935717\n",
      "epoch time: 53249.615 ms, per step time: 94.750 ms\n",
      "epoch: 22 step: 562, loss is 0.25679147\n",
      "epoch time: 53128.790 ms, per step time: 94.535 ms\n",
      "epoch: 23 step: 562, loss is 0.24767758\n",
      "epoch time: 54473.343 ms, per step time: 96.928 ms\n",
      "epoch: 24 step: 562, loss is 0.32685533\n",
      "epoch time: 53692.674 ms, per step time: 95.539 ms\n",
      "epoch: 25 step: 562, loss is 0.33594847\n",
      "epoch time: 53825.742 ms, per step time: 95.775 ms\n",
      "epoch: 26 step: 562, loss is 0.26285186\n",
      "epoch time: 52686.269 ms, per step time: 93.748 ms\n",
      "epoch: 27 step: 562, loss is 0.2713329\n",
      "epoch time: 54138.649 ms, per step time: 96.332 ms\n",
      "epoch: 28 step: 562, loss is 0.3673352\n",
      "epoch time: 52240.170 ms, per step time: 92.954 ms\n",
      "epoch: 29 step: 562, loss is 0.27099136\n",
      "epoch time: 53620.521 ms, per step time: 95.410 ms\n",
      "epoch: 30 step: 562, loss is 0.27835378\n",
      "epoch time: 52609.385 ms, per step time: 93.611 ms\n",
      "epoch: 31 step: 562, loss is 0.32528996\n",
      "epoch time: 53852.568 ms, per step time: 95.823 ms\n",
      "epoch: 32 step: 562, loss is 0.20690162\n",
      "epoch time: 53041.335 ms, per step time: 94.380 ms\n",
      "epoch: 33 step: 562, loss is 0.22032708\n",
      "epoch time: 53812.724 ms, per step time: 95.752 ms\n",
      "epoch: 34 step: 562, loss is 0.26088688\n",
      "epoch time: 53008.613 ms, per step time: 94.321 ms\n",
      "epoch: 35 step: 562, loss is 0.18645518\n",
      "epoch time: 51745.301 ms, per step time: 92.073 ms\n",
      "epoch: 36 step: 562, loss is 0.23079921\n",
      "epoch time: 52540.383 ms, per step time: 93.488 ms\n",
      "epoch: 37 step: 562, loss is 0.30234945\n",
      "epoch time: 53070.924 ms, per step time: 94.432 ms\n",
      "epoch: 38 step: 562, loss is 0.22891478\n",
      "epoch time: 53712.253 ms, per step time: 95.573 ms\n",
      "epoch: 39 step: 562, loss is 0.41451395\n",
      "epoch time: 51823.190 ms, per step time: 92.212 ms\n",
      "epoch: 40 step: 562, loss is 0.24765491\n",
      "epoch time: 53093.313 ms, per step time: 94.472 ms\n",
      "epoch: 41 step: 562, loss is 0.24162261\n",
      "epoch time: 53115.198 ms, per step time: 94.511 ms\n",
      "epoch: 42 step: 562, loss is 0.25284204\n",
      "epoch time: 52325.694 ms, per step time: 93.106 ms\n",
      "epoch: 43 step: 562, loss is 0.26474008\n",
      "epoch time: 53343.928 ms, per step time: 94.918 ms\n",
      "epoch: 44 step: 562, loss is 0.17949194\n",
      "epoch time: 52063.498 ms, per step time: 92.640 ms\n",
      "epoch: 45 step: 562, loss is 0.26451832\n",
      "epoch time: 52597.290 ms, per step time: 93.589 ms\n",
      "epoch: 46 step: 562, loss is 0.22577551\n",
      "epoch time: 53421.691 ms, per step time: 95.056 ms\n",
      "epoch: 47 step: 562, loss is 0.22373937\n",
      "epoch time: 53547.300 ms, per step time: 95.280 ms\n",
      "epoch: 48 step: 562, loss is 0.2629213\n",
      "epoch time: 52678.056 ms, per step time: 93.733 ms\n",
      "epoch: 49 step: 562, loss is 0.34563103\n",
      "epoch time: 52518.655 ms, per step time: 93.450 ms\n",
      "epoch: 50 step: 562, loss is 0.28244188\n",
      "epoch time: 52569.788 ms, per step time: 93.541 ms\n",
      "epoch: 51 step: 562, loss is 0.24224278\n",
      "epoch time: 52680.245 ms, per step time: 93.737 ms\n",
      "epoch: 52 step: 562, loss is 0.25028247\n",
      "epoch time: 53883.881 ms, per step time: 95.879 ms\n",
      "epoch: 53 step: 562, loss is 0.29212382\n",
      "epoch time: 52072.109 ms, per step time: 92.655 ms\n",
      "epoch: 54 step: 562, loss is 0.35162798\n",
      "epoch time: 53334.037 ms, per step time: 94.900 ms\n",
      "epoch: 55 step: 562, loss is 0.27180514\n",
      "epoch time: 52512.176 ms, per step time: 93.438 ms\n",
      "epoch: 56 step: 562, loss is 0.25452653\n",
      "epoch time: 51639.776 ms, per step time: 91.886 ms\n",
      "epoch: 57 step: 562, loss is 0.16884091\n",
      "epoch time: 52711.213 ms, per step time: 93.792 ms\n",
      "epoch: 58 step: 562, loss is 0.18407814\n",
      "epoch time: 53026.780 ms, per step time: 94.354 ms\n",
      "epoch: 59 step: 562, loss is 0.24484621\n",
      "epoch time: 54153.906 ms, per step time: 96.359 ms\n",
      "epoch: 60 step: 562, loss is 0.2653444\n",
      "epoch time: 52157.491 ms, per step time: 92.807 ms\n",
      "epoch: 61 step: 562, loss is 0.19639185\n",
      "epoch time: 53033.723 ms, per step time: 94.366 ms\n",
      "epoch: 62 step: 562, loss is 0.22915696\n",
      "epoch time: 53562.234 ms, per step time: 95.306 ms\n",
      "epoch: 63 step: 562, loss is 0.24921855\n",
      "epoch time: 52462.931 ms, per step time: 93.350 ms\n",
      "epoch: 64 step: 562, loss is 0.25141737\n",
      "epoch time: 53669.848 ms, per step time: 95.498 ms\n",
      "epoch: 65 step: 562, loss is 0.14625068\n",
      "epoch time: 52779.932 ms, per step time: 93.914 ms\n",
      "epoch: 66 step: 562, loss is 0.30439532\n",
      "epoch time: 53532.917 ms, per step time: 95.254 ms\n",
      "epoch: 67 step: 562, loss is 0.34872335\n",
      "epoch time: 53399.830 ms, per step time: 95.017 ms\n",
      "epoch: 68 step: 562, loss is 0.22534245\n",
      "epoch time: 53237.675 ms, per step time: 94.729 ms\n",
      "epoch: 69 step: 562, loss is 0.22056437\n",
      "epoch time: 53982.691 ms, per step time: 96.055 ms\n",
      "epoch: 70 step: 562, loss is 0.27974263\n",
      "epoch time: 52842.037 ms, per step time: 94.025 ms\n",
      "epoch: 71 step: 562, loss is 0.24798425\n",
      "epoch time: 52444.856 ms, per step time: 93.318 ms\n",
      "epoch: 72 step: 562, loss is 0.31831396\n",
      "epoch time: 53239.385 ms, per step time: 94.732 ms\n",
      "epoch: 73 step: 562, loss is 0.27447766\n",
      "epoch time: 52928.949 ms, per step time: 94.180 ms\n",
      "epoch: 74 step: 562, loss is 0.28237343\n",
      "epoch time: 53256.069 ms, per step time: 94.762 ms\n",
      "epoch: 75 step: 562, loss is 0.26107782\n",
      "epoch time: 52515.326 ms, per step time: 93.444 ms\n",
      "epoch: 76 step: 562, loss is 0.37857202\n",
      "epoch time: 52689.494 ms, per step time: 93.754 ms\n",
      "epoch: 77 step: 562, loss is 0.23611948\n",
      "epoch time: 51685.271 ms, per step time: 91.967 ms\n",
      "epoch: 78 step: 562, loss is 0.22944261\n",
      "epoch time: 52601.629 ms, per step time: 93.597 ms\n",
      "epoch: 79 step: 562, loss is 0.29882035\n",
      "epoch time: 53056.706 ms, per step time: 94.407 ms\n",
      "epoch: 80 step: 562, loss is 0.23418707\n",
      "epoch time: 51331.381 ms, per step time: 91.337 ms\n",
      "epoch: 81 step: 562, loss is 0.22800633\n",
      "epoch time: 54081.460 ms, per step time: 96.230 ms\n",
      "epoch: 82 step: 562, loss is 0.22373256\n",
      "epoch time: 52282.315 ms, per step time: 93.029 ms\n",
      "epoch: 83 step: 562, loss is 0.30237824\n",
      "epoch time: 52504.044 ms, per step time: 93.424 ms\n",
      "epoch: 84 step: 562, loss is 0.28990352\n",
      "epoch time: 52855.867 ms, per step time: 94.050 ms\n",
      "epoch: 85 step: 562, loss is 0.21391529\n",
      "epoch time: 53238.271 ms, per step time: 94.730 ms\n",
      "epoch: 86 step: 562, loss is 0.2405097\n",
      "epoch time: 52557.772 ms, per step time: 93.519 ms\n",
      "epoch: 87 step: 562, loss is 0.21648239\n",
      "epoch time: 53138.360 ms, per step time: 94.552 ms\n",
      "epoch: 88 step: 562, loss is 0.28499347\n",
      "epoch time: 52484.277 ms, per step time: 93.388 ms\n",
      "epoch: 89 step: 562, loss is 0.30493435\n",
      "epoch time: 52143.687 ms, per step time: 92.782 ms\n",
      "epoch: 90 step: 562, loss is 0.20544618\n",
      "epoch time: 52449.592 ms, per step time: 93.327 ms\n",
      "epoch: 91 step: 562, loss is 0.2305917\n",
      "epoch time: 52382.849 ms, per step time: 93.208 ms\n",
      "epoch: 92 step: 562, loss is 0.24197742\n",
      "epoch time: 53620.837 ms, per step time: 95.411 ms\n",
      "epoch: 93 step: 562, loss is 0.18494222\n",
      "epoch time: 53422.933 ms, per step time: 95.059 ms\n",
      "epoch: 94 step: 562, loss is 0.40015584\n",
      "epoch time: 52894.859 ms, per step time: 94.119 ms\n",
      "epoch: 95 step: 562, loss is 0.26687893\n",
      "epoch time: 52389.086 ms, per step time: 93.219 ms\n",
      "epoch: 96 step: 562, loss is 0.22774383\n",
      "epoch time: 53494.685 ms, per step time: 95.186 ms\n",
      "epoch: 97 step: 562, loss is 0.25207716\n",
      "epoch time: 54250.558 ms, per step time: 96.531 ms\n",
      "epoch: 98 step: 562, loss is 0.26396844\n",
      "epoch time: 52825.318 ms, per step time: 93.995 ms\n",
      "epoch: 99 step: 562, loss is 0.20879816\n",
      "epoch time: 53231.088 ms, per step time: 94.717 ms\n",
      "epoch: 100 step: 562, loss is 0.21062519\n",
      "epoch time: 52449.559 ms, per step time: 93.327 ms\n",
      "epoch: 101 step: 562, loss is 0.28307787\n",
      "[ERROR] ME(14869:281473128734304,MainProcess):2021-08-23-18:06:13.697.542 [mindspore/train/serialization.py:148] Failed to save the checkpoint file /home/ma-user/work/deeplabv3/ckpt/DeepLabV3plus_s16-101_562.ckpt.\n",
      "Traceback (most recent call last):\n",
      "  File \"train.py\", line 214, in <module>\n",
      "    train()\n",
      "  File \"train.py\", line 205, in train\n",
      "    model.train(args.train_epochs, dataset, callbacks=cbs, dataset_sink_mode=(args.device_target != \"CPU\"))\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/model.py\", line 627, in train\n",
      "    sink_size=sink_size)\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/model.py\", line 413, in _train\n",
      "    self._train_dataset_sink_process(epoch, train_dataset, list_callback, cb_params, sink_size)\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/model.py\", line 481, in _train_dataset_sink_process\n",
      "    list_callback.step_end(run_context)\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/callback/_callback.py\", line 210, in step_end\n",
      "    cb.step_end(run_context)\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/callback/_checkpoint.py\", line 294, in step_end\n",
      "    self._save_ckpt(cb_params)\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/callback/_checkpoint.py\", line 370, in _save_ckpt\n",
      "    self._config.async_save)\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/serialization.py\", line 219, in save_checkpoint\n",
      "    _exec_save(ckpt_file_name, data_list)\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/serialization.py\", line 149, in _exec_save\n",
      "    raise e\n",
      "  File \"/home/ma-user/miniconda3/envs/MindSpore-python3.7-aarch64/lib/python3.7/site-packages/mindspore/train/serialization.py\", line 143, in _exec_save\n",
      "    f.write(checkpoint_list.SerializeToString())\n",
      "OSError: [Errno 122] Disk quota exceeded\n",
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "source": [
    "!python eval.py --data_root='/home/ma-user/work/data'  \\\n",
    "                    --data_lst='/home/ma-user/work/data/eval.txt'  \\\n",
    "                    --batch_size=64  \\\n",
    "                    --crop_size=256  \\\n",
    "                    --ignore_label=255  \\\n",
    "                    --num_classes=2  \\\n",
    "                    --model=DeepLabV3plus_s16  \\\n",
    "                    --scales=1.0  \\\n",
    "                    --freeze_bn  \\\n",
    "                    --ckpt_path='/home/ma-user/work/deeplabv3/ckpt/DeepLabV3plus_s16-100_562.ckpt' \n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[WARNING] SESSION(9827,python):2021-08-24-04:52:36.809.441 [mindspore/ccsrc/backend/session/ascend_session.cc:1400] SelectKernel] There are 116 node/nodes used reduce precision to selected the kernel!\n",
      "[256]\n",
      "processed 64 images\n",
      "[256]\n",
      "processed 128 images\n",
      "[256]\n",
      "processed 192 images\n",
      "[256]\n",
      "processed 256 images\n",
      "[256]\n",
      "processed 320 images\n",
      "[256]\n",
      "processed 384 images\n",
      "[256]\n",
      "processed 448 images\n",
      "[256]\n",
      "processed 512 images\n",
      "[256]\n",
      "processed 576 images\n",
      "[256]\n",
      "processed 640 images\n",
      "[256]\n",
      "processed 704 images\n",
      "[256]\n",
      "processed 768 images\n",
      "[256]\n",
      "processed 832 images\n",
      "[256]\n",
      "processed 896 images\n",
      "[256]\n",
      "processed 960 images\n",
      "[256]\n",
      "processed 1024 images\n",
      "[256]\n",
      "processed 1088 images\n",
      "[256]\n",
      "processed 1152 images\n",
      "[256]\n",
      "processed 1216 images\n",
      "[256]\n",
      "processed 1280 images\n",
      "[256]\n",
      "processed 1344 images\n",
      "[256]\n",
      "processed 1408 images\n",
      "[256]\n",
      "processed 1472 images\n",
      "[256]\n",
      "processed 1536 images\n",
      "[256]\n",
      "processed 1600 images\n",
      "[256]\n",
      "processed 1664 images\n",
      "[256]\n",
      "processed 1728 images\n",
      "[256]\n",
      "processed 1792 images\n",
      "[256]\n",
      "processed 1856 images\n",
      "[256]\n",
      "processed 1920 images\n",
      "[256]\n",
      "processed 1984 images\n",
      "[256]\n",
      "processed 2001 images\n",
      "[[35348599.  4815718.]\n",
      " [ 8543580. 82429639.]]\n",
      "per-class IoU [0.72572624 0.86053402]\n",
      "mean IoU 0.7931301303857221\n",
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "source": [
    "import cv2\n",
    "import mindspore.common.dtype as mstype\n",
    "from mindspore import Tensor\n",
    "img = cv2.imread('/home/ma-user/work/data/labels/100_mask.png',cv2.IMREAD_GRAYSCALE)\n",
    "img = img*255\n",
    "cv2.imwrite('ori.png',img)\n",
    "print(img)\n",
    "# print(img/255)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[[255 255 255 ...   0   0   0]\n",
      " [255 255 255 ...   0   0   0]\n",
      " [255 255 255 ...   0   0   0]\n",
      " ...\n",
      " [255 255 255 ... 255 255 255]\n",
      " [255 255 255 ... 255 255 255]\n",
      " [255 255 255 ... 255 255 255]]\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "source": [
    "!python predict.py"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[[[[0.04223541 0.03344936 0.02655814 ... 0.89696705 0.8755879\n",
      "    0.85058665]\n",
      "   [0.03173571 0.0262544  0.02170473 ... 0.91459197 0.89422554\n",
      "    0.86982375]\n",
      "   [0.02387584 0.02056432 0.01771636 ... 0.9295887  0.91052574\n",
      "    0.88677204]\n",
      "   ...\n",
      "   [0.02025684 0.0182608  0.01640427 ... 0.7158336  0.72482747\n",
      "    0.7337872 ]\n",
      "   [0.02323571 0.02021289 0.01758383 ... 0.7606314  0.7699248\n",
      "    0.77900344]\n",
      "   [0.02655814 0.02237604 0.01883403 ... 0.80039084 0.80956614\n",
      "    0.8184634 ]]\n",
      "\n",
      "  [[0.95776457 0.96655065 0.97344184 ... 0.10303295 0.12441214\n",
      "    0.14941335]\n",
      "   [0.9682643  0.9737456  0.97829527 ... 0.08540806 0.10577448\n",
      "    0.13017626]\n",
      "   [0.97612417 0.9794357  0.98228365 ... 0.07041135 0.08947427\n",
      "    0.11322797]\n",
      "   ...\n",
      "   [0.9797432  0.9817392  0.9835957  ... 0.28416637 0.27517253\n",
      "    0.26621282]\n",
      "   [0.97676426 0.9797871  0.98241615 ... 0.23936862 0.23007518\n",
      "    0.22099657]\n",
      "   [0.97344184 0.97762394 0.98116595 ... 0.19960919 0.19043383\n",
      "    0.18153661]]]]\n",
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "source": [
    "from mindspore.train.serialization import load_checkpoint, load_param_into_net\n",
    "from src.deeplab_v3plus import DeepLabV3Plus\n",
    "import mindspore.nn as nn\n",
    "import os\n",
    "import sys\n",
    "import numpy as np\n",
    "import cv2\n",
    "from mindspore import Model, Tensor\n",
    "import mindspore.common.dtype as mstype\n",
    "import argparse\n",
    "def parse_args():\n",
    "    \"\"\"parse_args\"\"\"\n",
    "    parser = argparse.ArgumentParser('MindSpore DeepLabV3+ eval')\n",
    "\n",
    "    # val data\n",
    "    parser.add_argument('--data_root', type=str, default='', help='root path of val data')\n",
    "    parser.add_argument('--data_lst', type=str, default='', help='list of val data')\n",
    "    parser.add_argument('--batch_size', type=int, default=1, help='batch size')\n",
    "    parser.add_argument('--crop_size', type=int, default=256, help='crop size')\n",
    "    parser.add_argument('--image_mean', type=list, default=[103.53, 116.28, 123.675], help='image mean')\n",
    "    parser.add_argument('--image_std', type=list, default=[57.375, 57.120, 58.395], help='image std')\n",
    "    parser.add_argument('--scales', type=float, action='append', help='scales of evaluation')\n",
    "    parser.add_argument('--flip', action='store_true', help='perform left-right flip')\n",
    "    parser.add_argument('--ignore_label', type=int, default=255, help='ignore label')\n",
    "    parser.add_argument('--num_classes', type=int, default=2, help='number of classes')\n",
    "\n",
    "    # model\n",
    "    parser.add_argument('--model', type=str, default='', help='select model')\n",
    "    parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze bn')\n",
    "    parser.add_argument('--ckpt_path', type=str, default='', help='model to evaluate')\n",
    "\n",
    "    args, _ = parser.parse_known_args()\n",
    "    return args\n",
    "class BuildEvalNetwork(nn.Cell):\n",
    "    def __init__(self, network):\n",
    "        super(BuildEvalNetwork, self).__init__()\n",
    "        self.network = network\n",
    "        self.softmax = nn.Softmax(axis=1)\n",
    "\n",
    "    def construct(self, input_data):\n",
    "        output = self.network(input_data)\n",
    "        output = self.softmax(output)\n",
    "        return output\n",
    "def cal_hist(a, b, n):\n",
    "    k = (a >= 0) & (a < n)\n",
    "    return np.bincount(n * a[k].astype(np.int32) + b[k], minlength=n ** 2).reshape(n, n)\n",
    "\n",
    "\n",
    "def resize_long(img, long_size=256):\n",
    "    h, w, _ = img.shape\n",
    "    if h > w:\n",
    "        new_h = long_size\n",
    "        new_w = int(1.0 * long_size * w / h)\n",
    "    else:\n",
    "        new_w = long_size\n",
    "        new_h = int(1.0 * long_size * h / w)\n",
    "    imo = cv2.resize(img, (new_w, new_h))\n",
    "    return imo\n",
    "\n",
    "def pre_process(args, img_, crop_size=256):\n",
    "    \"\"\"pre_process\"\"\"\n",
    "    # resize\n",
    "    img_ = resize_long(img_, crop_size)\n",
    "    resize_h, resize_w, _ = img_.shape\n",
    "\n",
    "    # mean, std\n",
    "    image_mean = np.array(args.image_mean)\n",
    "    image_std = np.array(args.image_std)\n",
    "    img_ = (img_ - image_mean) / image_std\n",
    "\n",
    "    # pad to crop_size\n",
    "    pad_h = crop_size - img_.shape[0]\n",
    "    pad_w = crop_size - img_.shape[1]\n",
    "    if pad_h > 0 or pad_w > 0:\n",
    "        img_ = cv2.copyMakeBorder(img_, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)\n",
    "\n",
    "    # hwc to chw\n",
    "    img_ = img_.transpose((2, 0, 1))\n",
    "    return img_, resize_h, resize_w\n",
    "\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "source": [
    "args = parse_args()"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "source": [
    "network = DeepLabV3Plus('eval', 2, 16, False)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "source": [
    "eval_net = BuildEvalNetwork(network)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "source": [
    "param_dict = load_checkpoint('/home/ma-user/work/deeplabv3/ckpt/DeepLabV3plus_s16-100_562.ckpt')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "source": [
    "load_param_into_net(eval_net, param_dict)"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[]"
      ]
     },
     "metadata": {},
     "execution_count": 14
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "eval_net.set_train(False)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "source": [
    "batch_img = np.zeros((args.batch_size, 3, 256, 256), dtype=np.float32)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "source": [
    "img_ = cv2.imread('/home/ma-user/work/data/images/100.png')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "source": [
    "img_, resize_h, resize_w = pre_process(args, img_, args.crop_size)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "source": [
    "batch_img[0] = img_"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "source": [
    "batch_img = np.ascontiguousarray(batch_img)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "source": [
    "net_out = eval_net(Tensor(batch_img, mstype.float32))"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "source": [
    "net_out = net_out.asnumpy()"
   ],
   "outputs": [
    {
     "output_type": "error",
     "ename": "AttributeError",
     "evalue": "'numpy.ndarray' object has no attribute 'asnumpy'",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-45-3df6acdfd987>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mnet_out\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnet_out\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m: 'numpy.ndarray' object has no attribute 'asnumpy'"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "source": [
    "print(net_out[0][1])"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[[0.95776457 0.96655065 0.97344184 ... 0.10322934 0.12450572 0.14950167]\n",
      " [0.9682643  0.9737456  0.97829527 ... 0.08540806 0.10577448 0.1302686 ]\n",
      " [0.97612417 0.9794357  0.98228365 ... 0.07056955 0.08962605 0.11322797]\n",
      " ...\n",
      " [0.9797432  0.9817392  0.9835957  ... 0.28416637 0.27517253 0.26621282]\n",
      " [0.97676426 0.9797871  0.98241615 ... 0.23936862 0.22993043 0.22099657]\n",
      " [0.97344184 0.97762394 0.98116595 ... 0.19960919 0.19043383 0.18153661]]\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "source": [
    "ret,thresh1 = cv2.threshold(net_out[0][1],0.5,255,cv2.THRESH_BINARY)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "source": [
    "print(thresh1)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[[255. 255. 255. ...   0.   0.   0.]\n",
      " [255. 255. 255. ...   0.   0.   0.]\n",
      " [255. 255. 255. ...   0.   0.   0.]\n",
      " ...\n",
      " [255. 255. 255. ...   0.   0.   0.]\n",
      " [255. 255. 255. ...   0.   0.   0.]\n",
      " [255. 255. 255. ...   0.   0.   0.]]\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "source": [
    "cv2.imwrite('out.png',thresh1)"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "metadata": {},
     "execution_count": 92
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "# mox.file.copy_parallel('deeplabv3','obs://lawther-bisai/all/deeplabv3')\n",
    "# mox.file.copy_parallel('data','obs://lawther-bisai/all/data')\n",
    "mox.file.copy_parallel('base-line.ipynb','obs://lawther-bisai/all/')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "cp -r data/ remote-sensing-deep-lab/"
   ],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.2 64-bit"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.2"
  },
  "interpreter": {
   "hash": "564b45e5ed683fb526fd2b4643029068fcf9702b88397529be454a97635db8ab"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}