{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "26f13818-427a-4cea-bfc5-d08b801c6221",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mindrecord_file /home/ma-user/work/ICNet/data/cityscapes_mindrecord/cityscapes-2975.mindrecord\n",
      "cfg['train'][‘’train_batch_size_percard‘] 4\n",
      "data_size 1\n",
      "pretrained....\n",
      "train begins------------------------------\n",
      "epoch: 1 step: 1, loss is 5.391666412353516\n",
      "Train epoch time: 62726.368 ms, per step time: 62726.368 ms\n",
      "epoch: 2 step: 1, loss is 5.209846496582031\n",
      "Train epoch time: 54.050 ms, per step time: 54.050 ms\n",
      "epoch: 3 step: 1, loss is 4.969461917877197\n",
      "Train epoch time: 52.545 ms, per step time: 52.545 ms\n",
      "epoch: 4 step: 1, loss is 4.363028049468994\n",
      "Train epoch time: 51.385 ms, per step time: 51.385 ms\n",
      "epoch: 5 step: 1, loss is 4.58790397644043\n",
      "Train epoch time: 3581.642 ms, per step time: 3581.642 ms\n",
      "epoch: 6 step: 1, loss is 4.636248588562012\n",
      "Train epoch time: 56.935 ms, per step time: 56.935 ms\n",
      "epoch: 7 step: 1, loss is 3.948723793029785\n",
      "Train epoch time: 51.524 ms, per step time: 51.524 ms\n",
      "epoch: 8 step: 1, loss is 3.7971291542053223\n",
      "Train epoch time: 51.049 ms, per step time: 51.049 ms\n",
      "epoch: 9 step: 1, loss is 4.240333557128906\n",
      "Train epoch time: 56.195 ms, per step time: 56.195 ms\n",
      "epoch: 10 step: 1, loss is 3.3314199447631836\n",
      "Train epoch time: 3799.864 ms, per step time: 3799.864 ms\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "raw",
   "id": "454881ae-f68b-49fc-960a-9ae62f34a738",
   "metadata": {},
   "source": [
    "环境准备与数据读取\n",
    "本案例基于MindSpore-CPU版本实现，在CPU上完成模型训练。\n",
    "\n",
    "案例实现所使用的数据:Cityscape Dataset Website\n",
    "\n",
    "为了下载数据集，我们首先需要在Cityscapes数据集官网进行注册，并且最好使用edu教育邮箱进行注册，此后等待几天，就可以下载数据集了，这里我们下载了两个文件：gtFine_trainvaltest.zip和leftImg8bit_trainvaltest.zip (11GB)。\n",
    "\n",
    "下载完成后，我们对数据集压缩文件进行解压，文件的目录结构如下所示。\n",
    "\n",
    "由于我们是在CPU上跑得，原本数据集有1个多G，全部拿来跑得话，很容易掉卡，故我们就选择一个城市的一些图片完成。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3bc8a45c-bcae-43c0-b7e0-13892f041638",
   "metadata": {},
   "outputs": [],
   "source": [
    "首先要处理数据，生成对应的.mindrecord 和 .mindrecord.db文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f3b78ac5-3c0d-4cac-83c3-00554f9ba6ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "需要注意的是，在生成这两个文件之前，我们要建立一个文件夹，用cityscapes_mindrecord命名，放在cityscapes文件夹的同级目录下：\n",
    "而且要保持cityscapes_mindrecord文件夹里面为空"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e041700-a21c-4fb4-84bf-341102631b39",
   "metadata": {},
   "outputs": [],
   "source": [
    "下面是构建数据集的代码：\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f7a4a99a-3cf1-4456-848b-b92175cca29c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 1 images in the folder /home/ma-user/work/ICNet/data/cityscapes/leftImg8bit/train\n"
     ]
    }
   ],
   "source": [
    "\n",
    "\"\"\"Prepare Cityscapes dataset\"\"\"\n",
    "import os\n",
    "import random\n",
    "import argparse\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "from PIL import ImageOps\n",
    "from PIL import ImageFilter\n",
    "import mindspore.dataset as de\n",
    "from mindspore.mindrecord import FileWriter\n",
    "import mindspore.dataset.vision as transforms\n",
    "import mindspore.dataset.transforms as tc\n",
    "\n",
    "\n",
    "def _get_city_pairs(folder, split='train'):\n",
    "    \"\"\"Return two path arrays of data set img and mask\"\"\"\n",
    "\n",
    "    def get_path_pairs(image_folder, masks_folder):\n",
    "        image_paths = []\n",
    "        masks_paths = []\n",
    "        for root, _, files in os.walk(image_folder):\n",
    "            for filename in files:\n",
    "                if filename.endswith('.png'):\n",
    "                    imgpath = os.path.join(root, filename)\n",
    "                    foldername = os.path.basename(os.path.dirname(imgpath))\n",
    "                    maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')\n",
    "                    maskpath = os.path.join(masks_folder, foldername, maskname)\n",
    "                    if os.path.isfile(imgpath) and os.path.isfile(maskpath):\n",
    "                        image_paths.append(imgpath)\n",
    "                        masks_paths.append(maskpath)\n",
    "                    else:\n",
    "                        print('cannot find the mask or image:', imgpath, maskpath)\n",
    "        print('Found {} images in the folder {}'.format(len(image_paths), image_folder))\n",
    "        return image_paths, masks_paths\n",
    "\n",
    "    if split in ('train', 'val'):\n",
    "        # \"./Cityscapes/leftImg8bit/train\" or \"./Cityscapes/leftImg8bit/val\"\n",
    "        img_folder = os.path.join(folder, 'leftImg8bit/' + split)\n",
    "        # \"./Cityscapes/gtFine/train\" or \"./Cityscapes/gtFine/val\"\n",
    "        mask_folder = os.path.join(folder, 'gtFine/' + split)\n",
    "        # The order of img_paths and mask_paths is one-to-one correspondence\n",
    "        img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)\n",
    "        return img_paths, mask_paths\n",
    "\n",
    "\n",
    "def _sync_transform(img, mask):\n",
    "    \"\"\"img and mask augmentation\"\"\"\n",
    "    a = random.Random()\n",
    "    a.seed(1234)\n",
    "    base_size = 1024\n",
    "    crop_size = 960\n",
    "\n",
    "    # random mirror\n",
    "    if random.random() < 0.5:\n",
    "        img = img.transpose(Image.FLIP_LEFT_RIGHT)\n",
    "        mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n",
    "    crop_size = crop_size\n",
    "    # random scale (short edge)\n",
    "    short_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n",
    "    w, h = img.size\n",
    "    if h > w:\n",
    "        ow = short_size\n",
    "        oh = int(1.0 * h * ow / w)\n",
    "    else:\n",
    "        oh = short_size\n",
    "        ow = int(1.0 * w * oh / h)\n",
    "    img = img.resize((ow, oh), Image.BILINEAR)\n",
    "    mask = mask.resize((ow, oh), Image.NEAREST)\n",
    "    # pad crop\n",
    "    if short_size < crop_size:\n",
    "        padh = crop_size - oh if oh < crop_size else 0\n",
    "        padw = crop_size - ow if ow < crop_size else 0\n",
    "        img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n",
    "        mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)\n",
    "    # random crop crop_size\n",
    "    w, h = img.size\n",
    "    x1 = random.randint(0, w - crop_size)\n",
    "    y1 = random.randint(0, h - crop_size)\n",
    "    img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))\n",
    "    mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))\n",
    "    # gaussian blur as in PSP\n",
    "    if random.random() < 0.5:\n",
    "        img = img.filter(ImageFilter.GaussianBlur(radius=random.random()))\n",
    "    # final transform\n",
    "    output = _img_mask_transform(img, mask)\n",
    "\n",
    "    return output\n",
    "\n",
    "\n",
    "def _class_to_index(mask):\n",
    "    \"\"\"class to index\"\"\"\n",
    "    # Reference:\n",
    "    # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py\n",
    "    _key = np.array([-1, -1, -1, -1, -1, -1,\n",
    "                     -1, -1, 0, 1, -1, -1,\n",
    "                     2, 3, 4, -1, -1, -1,\n",
    "                     5, -1, 6, 7, 8, 9,\n",
    "                     10, 11, 12, 13, 14, 15,\n",
    "                     -1, -1, 16, 17, 18])\n",
    "    # [-1, ..., 33]\n",
    "    _mapping = np.array(range(-1, len(_key) - 1)).astype('int32')\n",
    "\n",
    "    # assert the value\n",
    "    values = np.unique(mask)\n",
    "    for value in values:\n",
    "        assert value in _mapping\n",
    "    # Get the index of each pixel value in the mask corresponding to _mapping\n",
    "    index = np.digitize(mask.ravel(), _mapping, right=True)\n",
    "    # According to the above index, according to _key, get the corresponding\n",
    "    return _key[index].reshape(mask.shape)\n",
    "\n",
    "\n",
    "def _img_transform(img):\n",
    "    return np.array(img)\n",
    "\n",
    "\n",
    "def _mask_transform(mask):\n",
    "    target = _class_to_index(np.array(mask).astype('int32'))\n",
    "    return np.array(target).astype('int32')\n",
    "\n",
    "\n",
    "def _img_mask_transform(img, mask):\n",
    "    \"\"\"img and mask transform\"\"\"\n",
    "    input_transform = tc.Compose([\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)])\n",
    "    img = _img_transform(img)\n",
    "    mask = _mask_transform(mask)\n",
    "    img = input_transform(img)\n",
    "\n",
    "    img = np.array(img).astype(np.float32)\n",
    "    mask = np.array(mask).astype(np.float32)\n",
    "\n",
    "    return (img, mask)\n",
    "\n",
    "\n",
    "def data_to_mindrecord_img(prefix='cityscapes-2975.mindrecord', file_num=1,\n",
    "                           root='./', split='train', mindrecord_dir=\"./\"):\n",
    "    \"\"\"to mindrecord\"\"\"\n",
    "    mindrecord_path = os.path.join(mindrecord_dir, prefix)\n",
    "\n",
    "    writter = FileWriter(mindrecord_path, file_num)\n",
    "\n",
    "    img_paths, mask_paths = _get_city_pairs(root, split)\n",
    "\n",
    "    cityscapes_json = {\n",
    "        \"images\": {\"type\": \"int32\", \"shape\": [1024, 2048, 3]},\n",
    "        \"mask\": {\"type\": \"int32\", \"shape\": [1024, 2048]},\n",
    "    }\n",
    "\n",
    "    writter.add_schema(cityscapes_json, \"cityscapes_json\")\n",
    "\n",
    "    images_files_num = len(img_paths)\n",
    "    for index in range(images_files_num):\n",
    "        img = Image.open(img_paths[index]).convert('RGB')\n",
    "        img = np.array(img, dtype=np.int32)\n",
    "\n",
    "        mask = Image.open(mask_paths[index])\n",
    "        mask = np.array(mask, dtype=np.int32)\n",
    "\n",
    "        row = {\"images\": img, \"mask\": mask}\n",
    "        # print(\"images\",img, \"mask\", mask)\n",
    "        # print(\"images_files_num，index， img_paths[index],mask_paths[index]\",images_files_num,index,img_paths[index],mask_paths[index])\n",
    "        if (index + 1) % 10 == 0:\n",
    "            print(\"writing {}/{} into mindrecord\".format(index + 1, images_files_num))\n",
    "        writter.write_raw_data([row])\n",
    "    writter.commit()\n",
    "\n",
    "\n",
    "def get_Image_crop_nor(img, mask):\n",
    "    image = np.uint8(img)\n",
    "    mask = np.uint8(mask)\n",
    "    image = Image.fromarray(image)\n",
    "    mask = Image.fromarray(mask)\n",
    "\n",
    "    output = _sync_transform(image, mask)\n",
    "\n",
    "    return output\n",
    "\n",
    "\n",
    "def create_icnet_dataset(mindrecord_file, batch_size=16, device_num=1, rank_id=0):\n",
    "    \"\"\"create dataset for training\"\"\"\n",
    "    a = random.Random()\n",
    "    a.seed(1234)\n",
    "    ds = de.MindDataset(mindrecord_file, columns_list=[\"images\", \"mask\"],\n",
    "                        num_shards=device_num, shard_id=rank_id, shuffle=True)\n",
    "    ds = ds.map(operations=get_Image_crop_nor, input_columns=[\"images\", \"mask\"], output_columns=[\"image\", \"masks\"])\n",
    "\n",
    "    ds = ds.batch(batch_size=batch_size, drop_remainder=False)\n",
    "\n",
    "    return ds\n",
    "\n",
    "\n",
    "dataset_path=\"/home/ma-user/work/ICNet/data/cityscapes/\"\n",
    "mindrecord_path=\"/home/ma-user/work/ICNet/data/cityscapes_mindrecord/\"\n",
    "\n",
    "data_to_mindrecord_img(root=dataset_path, mindrecord_dir=mindrecord_path)\n",
    "# if __name__ == '__main__':\n",
    "#     parser = argparse.ArgumentParser(description=\"dataset_to_mindrecord\")\n",
    "#     parser.add_argument(\"--dataset_path\", type=str, default=\"/home/ma-user/work/ICNet/data/cityscapes/\", help=\"dataset path\")\n",
    "#     parser.add_argument(\"--mindrecord_path\", type=str, default=\"/home/ma-user/work/ICNet/data/cityscapes_mindrecord/\",\n",
    "#                         help=\"mindrecord_path\")\n",
    "\n",
    "#     args_opt = parser.parse_args()\n",
    "#     data_to_mindrecord_img(root=args_opt.dataset_path, mindrecord_dir=args_opt.mindrecord_path)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e7162273-104c-4d16-b45d-d73f54a0aaba",
   "metadata": {},
   "outputs": [],
   "source": [
    "可以看到已经生成的对应的数据集文件，然后我们创建稍后用到的数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "14262c83-4f21-41f6-8f71-8b89bc01c543",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mindrecord_file /home/ma-user/work/ICNet/data/cityscapes_mindrecord/cityscapes-2975.mindrecord\n",
      "<mindspore.dataset.engine.datasets.BatchDataset object at 0xffff48185b10>\n"
     ]
    }
   ],
   "source": [
    "prefix = 'cityscapes-2975.mindrecord'\n",
    "train_mindrecord_dir=\"/home/ma-user/work/ICNet/data/cityscapes_mindrecord\"\n",
    "train_train_batch_size_percard=4\n",
    "device_num=1\n",
    "rank_id=0\n",
    "\n",
    "mindrecord_dir = train_mindrecord_dir\n",
    "mindrecord_file = os.path.join(mindrecord_dir, prefix)\n",
    "print(\"mindrecord_file\",mindrecord_file)\n",
    "# print(\"cfg['train'][‘’train_batch_size_percard‘]\",cfg['train'][\"train_batch_size_percard\"])\n",
    "\n",
    "dataset = create_icnet_dataset(mindrecord_file, batch_size=train_train_batch_size_percard,\n",
    "                                   device_num=device_num, rank_id=rank_id)\n",
    "print(dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8620f1f4-b14f-47d0-b9de-8ca15a72deff",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 建立需要训练模型的一些参数：(这里只是展示，不运行这里的代码，具体参数运行在后面)\n",
    "\n",
    "### 1.Model\n",
    "model:\n",
    "  name: \"icnet\"\n",
    "  backbone: \"resnet50v1\"\n",
    "  base_size: 1024    # during augmentation, shorter size will be resized between [base_size*0.5, base_size*2.0]\n",
    "  crop_size: 960     # end of augmentation, crop to training\n",
    "\n",
    "### 2.Optimizer\n",
    "optimizer:\n",
    "  init_lr: 0.02\n",
    "  momentum: 0.9\n",
    "  weight_decay: 0.0001\n",
    "\n",
    "### 3.Training\n",
    "train:\n",
    "  train_batch_size_percard: 4\n",
    "  valid_batch_size: 1\n",
    "  cityscapes_root: \"/data/cityscapes/\"\n",
    "  epochs: 10\n",
    "  val_epoch: 1        # run validation every val-epoch\n",
    "  ckpt_dir: \"./ckpt/\" # ckpt and training log will be saved here\n",
    "  mindrecord_dir: '/home/ma-user/work/ICNet/data/cityscapes_mindrecord'\n",
    "  pretrained_model_path: '/home/ma-user/work/ICNet/root/cacheckpt/resnet50-icnet-150_2.ckpt'\n",
    "  save_checkpoint_epochs: 5\n",
    "  keep_checkpoint_max: 10\n",
    "\n",
    "### 4.Valid\n",
    "test:\n",
    "  ckpt_path: \"\" # set the pretrained model path correctly"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8d163a62-9632-4a1a-8f66-96fa22686fb4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "data_size 1\n"
     ]
    }
   ],
   "source": [
    "train_epochs=10\n",
    "train_data_size = dataset.get_dataset_size()\n",
    "print(\"data_size\", train_data_size)\n",
    "epoch = train_epochs\n",
    "project_path=\"/home/ma-user/work/ICNet/\"\n",
    "train_pretrained_model_path=\"/home/ma-user/work/ICNet/root/cacheckpt/resnet50-icnet-150_2.ckpt\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "80b3bdb1-7618-4905-9719-7622f5e8d982",
   "metadata": {},
   "outputs": [],
   "source": [
    "下面是模型构建：\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "ca519a64-6546-418a-b0e6-359e28c6f3ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "import mindspore as ms\n",
    "import mindspore.nn as nn\n",
    "import mindspore.ops as ops\n",
    "from src.loss import ICNetLoss\n",
    "from src.models.resnet50_v1 import get_resnet50v1b\n",
    "\n",
    "__all__ = ['ICNetdc']\n",
    "\n",
    "class ICNetdc(nn.Cell):\n",
    "    \"\"\"Image Cascade Network\"\"\"\n",
    "\n",
    "    def __init__(self, nclass=19, pretrained_path=\"\", istraining=True, norm_layer=nn.SyncBatchNorm):\n",
    "        super(ICNetdc, self).__init__()\n",
    "        self.conv_sub1 = nn.SequentialCell(\n",
    "            _ConvBNReLU(3, 32, 3, 2, norm_layer=norm_layer),\n",
    "            _ConvBNReLU(32, 32, 3, 2, norm_layer=norm_layer),\n",
    "            _ConvBNReLU(32, 64, 3, 2, norm_layer=norm_layer)\n",
    "        )\n",
    "        self.istraining = istraining\n",
    "        self.ppm = PyramidPoolingModule()\n",
    "\n",
    "        self.backbone = SegBaseModel(root=pretrained_path, istraining=istraining)\n",
    "\n",
    "        self.head = _ICHead(nclass, norm_layer=norm_layer)\n",
    "\n",
    "        self.loss = ICNetLoss()\n",
    "\n",
    "        self.resize_bilinear = nn.ResizeBilinear()\n",
    "\n",
    "        self.__setattr__('exclusive', ['conv_sub1', 'head'])\n",
    "\n",
    "    def construct(self, x, y):\n",
    "        \"\"\"ICNet_construct\"\"\"\n",
    "        if x.shape[0] != 1:\n",
    "            x = x.squeeze()\n",
    "        # sub 1\n",
    "        x_sub1 = self.conv_sub1(x)\n",
    "\n",
    "        h, w = x.shape[2:]\n",
    "        # sub 2\n",
    "        x_sub2 = self.resize_bilinear(x, size=(h / 2, w / 2))\n",
    "        _, x_sub2, _, _ = self.backbone(x_sub2)\n",
    "\n",
    "        # sub 4\n",
    "        _, _, _, x_sub4 = self.backbone(x)\n",
    "        # add PyramidPoolingModule\n",
    "        x_sub4 = self.ppm(x_sub4)\n",
    "\n",
    "        output = self.head(x_sub1, x_sub2, x_sub4)\n",
    "\n",
    "        if self.istraining:\n",
    "            outputs = self.loss(output, y)\n",
    "        else:\n",
    "            outputs = output\n",
    "        return outputs\n",
    "\n",
    "class PyramidPoolingModule(nn.Cell):\n",
    "    \"\"\"PPM\"\"\"\n",
    "\n",
    "    def __init__(self, pyramids=None):\n",
    "        super(PyramidPoolingModule, self).__init__()\n",
    "        self.avgpool = ops.ReduceMean(keep_dims=True)\n",
    "        self.pool2 = nn.AvgPool2d(kernel_size=15, stride=15)\n",
    "        self.pool3 = nn.AvgPool2d(kernel_size=10, stride=10)\n",
    "        self.pool6 = nn.AvgPool2d(kernel_size=5, stride=5)\n",
    "        self.resize_bilinear = nn.ResizeBilinear()\n",
    "\n",
    "    def construct(self, x):\n",
    "        \"\"\"ppm_construct\"\"\"\n",
    "        feat = x\n",
    "        height, width = x.shape[2:]\n",
    "\n",
    "        x1 = self.avgpool(x, (2, 3))\n",
    "        x1 = self.resize_bilinear(x1, size=(height, width), align_corners=True)\n",
    "        feat = feat + x1\n",
    "\n",
    "        x2 = self.pool2(x)\n",
    "        x2 = self.resize_bilinear(x2, size=(height, width), align_corners=True)\n",
    "        feat = feat + x2\n",
    "\n",
    "        x3 = self.pool3(x)\n",
    "        x3 = self.resize_bilinear(x3, size=(height, width), align_corners=True)\n",
    "        feat = feat + x3\n",
    "\n",
    "        x6 = self.pool6(x)\n",
    "        x6 = self.resize_bilinear(x6, size=(height, width), align_corners=True)\n",
    "        feat = feat + x6\n",
    "\n",
    "        return feat\n",
    "\n",
    "\n",
    "class _ICHead(nn.Cell):\n",
    "    \"\"\"Head\"\"\"\n",
    "\n",
    "    def __init__(self, nclass, norm_layer=nn.SyncBatchNorm, **kwargs):\n",
    "        super(_ICHead, self).__init__()\n",
    "        self.cff_12 = CascadeFeatureFusion12(128, 64, 128, nclass, norm_layer, **kwargs)\n",
    "        self.cff_24 = CascadeFeatureFusion24(2048, 512, 128, nclass, norm_layer, **kwargs)\n",
    "\n",
    "        self.conv_cls = nn.Conv2d(128, nclass, 1, has_bias=False)\n",
    "        self.outputs = list()\n",
    "        self.resize_bilinear = nn.ResizeBilinear()\n",
    "\n",
    "    def construct(self, x_sub1, x_sub2, x_sub4):\n",
    "        \"\"\"Head_construct\"\"\"\n",
    "        outputs = self.outputs\n",
    "        x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2)\n",
    "\n",
    "        x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1)\n",
    "\n",
    "        h1, w1 = x_cff_12.shape[2:]\n",
    "        up_x2 = self.resize_bilinear(x_cff_12, size=(h1 * 2, w1 * 2),\n",
    "                                     align_corners=True)\n",
    "        up_x2 = self.conv_cls(up_x2)\n",
    "        h2, w2 = up_x2.shape[2:]\n",
    "\n",
    "        up_x8 = self.resize_bilinear(up_x2, size=(h2 * 4, w2 * 4),\n",
    "                                     align_corners=True)  # scale_factor=4,\n",
    "        outputs.append(up_x8)\n",
    "        outputs.append(up_x2)\n",
    "        outputs.append(x_12_cls)\n",
    "        outputs.append(x_24_cls)\n",
    "\n",
    "        return outputs\n",
    "\n",
    "\n",
    "class _ConvBNReLU(nn.Cell):\n",
    "    \"\"\"ConvBNRelu\"\"\"\n",
    "\n",
    "    def __init__(self, in_channels, out_channels, kernel_size=3, stride=2, padding=1, dilation=1,\n",
    "                 groups=1, norm_layer=nn.SyncBatchNorm, bias=False, **kwargs):\n",
    "        super(_ConvBNReLU, self).__init__()\n",
    "        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_mode='pad', padding=padding,\n",
    "                              dilation=dilation,\n",
    "                              group=1, has_bias=False)\n",
    "        self.bn = norm_layer(out_channels, momentum=0.1)\n",
    "        self.relu = nn.ReLU()\n",
    "\n",
    "    def construct(self, x):\n",
    "        x = self.conv(x)\n",
    "        x = self.bn(x)\n",
    "        x = self.relu(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "class CascadeFeatureFusion12(nn.Cell):\n",
    "    \"\"\"CFF Unit\"\"\"\n",
    "\n",
    "    def __init__(self, low_channels, high_channels, out_channels, nclass, norm_layer=nn.SyncBatchNorm, **kwargs):\n",
    "        super(CascadeFeatureFusion12, self).__init__()\n",
    "        self.conv_low = nn.SequentialCell(\n",
    "            nn.Conv2d(low_channels, out_channels, 3, pad_mode='pad', padding=2, dilation=2, has_bias=False),\n",
    "            norm_layer(out_channels, momentum=0.1)\n",
    "        )\n",
    "        self.conv_high = nn.SequentialCell(\n",
    "            nn.Conv2d(high_channels, out_channels, kernel_size=1, has_bias=False),\n",
    "            norm_layer(out_channels, momentum=0.1)\n",
    "        )\n",
    "        self.conv_low_cls = nn.Conv2d(in_channels=out_channels, out_channels=nclass, kernel_size=1, has_bias=False)\n",
    "        self.resize_bilinear = nn.ResizeBilinear()\n",
    "\n",
    "        self.scalar_cast = ops.ScalarCast()\n",
    "\n",
    "        self.relu = ms.nn.ReLU()\n",
    "\n",
    "    def construct(self, x_low, x_high):\n",
    "        \"\"\"cff_construct\"\"\"\n",
    "        h, w = x_high.shape[2:]\n",
    "        x_low = self.resize_bilinear(x_low, size=(h, w), align_corners=True)\n",
    "        x_low = self.conv_low(x_low)\n",
    "\n",
    "        x_high = self.conv_high(x_high)\n",
    "        x = x_low + x_high\n",
    "\n",
    "        x = self.relu(x)\n",
    "        x_low_cls = self.conv_low_cls(x_low)\n",
    "\n",
    "        return x, x_low_cls\n",
    "\n",
    "\n",
    "class CascadeFeatureFusion24(nn.Cell):\n",
    "    \"\"\"CFF Unit\"\"\"\n",
    "\n",
    "    def __init__(self, low_channels, high_channels, out_channels, nclass, norm_layer=nn.SyncBatchNorm, **kwargs):\n",
    "        super(CascadeFeatureFusion24, self).__init__()\n",
    "        self.conv_low = nn.SequentialCell(\n",
    "            nn.Conv2d(low_channels, out_channels, 3, pad_mode='pad', padding=2, dilation=2, has_bias=False),\n",
    "            norm_layer(out_channels, momentum=0.1)\n",
    "        )\n",
    "        self.conv_high = nn.SequentialCell(\n",
    "            nn.Conv2d(high_channels, out_channels, kernel_size=1, has_bias=False),\n",
    "            norm_layer(out_channels, momentum=0.1)\n",
    "        )\n",
    "        self.conv_low_cls = nn.Conv2d(in_channels=out_channels, out_channels=nclass, kernel_size=1, has_bias=False)\n",
    "\n",
    "        self.resize_bilinear = nn.ResizeBilinear()\n",
    "        self.relu = ms.nn.ReLU()\n",
    "\n",
    "    def construct(self, x_low, x_high):\n",
    "        \"\"\"ccf_construct\"\"\"\n",
    "        h, w = x_high.shape[2:]\n",
    "\n",
    "        x_low = self.resize_bilinear(x_low, size=(h, w), align_corners=True)\n",
    "        x_low = self.conv_low(x_low)\n",
    "\n",
    "        x_high = self.conv_high(x_high)\n",
    "        x = x_low + x_high\n",
    "\n",
    "        x = self.relu(x)\n",
    "        x_low_cls = self.conv_low_cls(x_low)\n",
    "\n",
    "        return x, x_low_cls\n",
    "\n",
    "\n",
    "class SegBaseModel(nn.Cell):\n",
    "    \"\"\"Base Model for Semantic Segmentation\"\"\"\n",
    "\n",
    "    def __init__(self, nclass=19, backbone='resnet50', root=\"\", istraining=False):\n",
    "        super(SegBaseModel, self).__init__()\n",
    "        self.nclass = nclass\n",
    "        if backbone == 'resnet50':\n",
    "            self.pretrained = get_resnet50v1b(ckpt_root=root, istraining=istraining)\n",
    "\n",
    "    def construct(self, x):\n",
    "        \"\"\"forwarding pre-trained network\"\"\"\n",
    "        x = self.pretrained.conv1(x)\n",
    "        x = self.pretrained.bn1(x)\n",
    "        x = self.pretrained.relu(x)\n",
    "        x = self.pretrained.maxpool(x)\n",
    "        c1 = self.pretrained.layer1(x)\n",
    "        c2 = self.pretrained.layer2(c1)\n",
    "        c3 = self.pretrained.layer3(c2)\n",
    "        c4 = self.pretrained.layer4(c3)\n",
    "\n",
    "        return c1, c2, c3, c4\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "5515465a-cb85-4e49-8189-0e1827b5fcd2",
   "metadata": {},
   "outputs": [],
   "source": [
    "def poly_lr(base_lr, decay_steps, total_steps, end_lr=0.0001, power=0.9):\n",
    "    for i in range(total_steps):\n",
    "        step_ = min(i, decay_steps)\n",
    "        yield (base_lr - end_lr) * ((1.0 - step_ / decay_steps) ** power) + end_lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "25bae900-3066-4433-b859-3f13e165367d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pretrained....\n",
      "train begins------------------------------\n",
      "epoch: 1 step: 1, loss is 5.358231544494629\n",
      "Train epoch time: 39402.722 ms, per step time: 39402.722 ms\n",
      "epoch: 2 step: 1, loss is 5.21595573425293\n",
      "Train epoch time: 57.008 ms, per step time: 57.008 ms\n",
      "epoch: 3 step: 1, loss is 4.972453594207764\n",
      "Train epoch time: 54.783 ms, per step time: 54.783 ms\n",
      "epoch: 4 step: 1, loss is 4.489568710327148\n",
      "Train epoch time: 55.998 ms, per step time: 55.998 ms\n",
      "epoch: 5 step: 1, loss is 4.6170654296875\n",
      "Train epoch time: 3907.637 ms, per step time: 3907.637 ms\n",
      "epoch: 6 step: 1, loss is 3.888723611831665\n",
      "Train epoch time: 58.655 ms, per step time: 58.655 ms\n",
      "epoch: 7 step: 1, loss is 3.802926540374756\n",
      "Train epoch time: 58.026 ms, per step time: 58.026 ms\n",
      "epoch: 8 step: 1, loss is 3.526907444000244\n",
      "Train epoch time: 53.282 ms, per step time: 53.282 ms\n",
      "epoch: 9 step: 1, loss is 4.568917274475098\n",
      "Train epoch time: 53.067 ms, per step time: 53.067 ms\n",
      "epoch: 10 step: 1, loss is 4.348641872406006\n",
      "Train epoch time: 3804.689 ms, per step time: 3804.689 ms\n"
     ]
    }
   ],
   "source": [
    "optimizer_init_lr=0.02\n",
    "optimizer_weight_decay = 0.0001\n",
    "optimizer_momentum= 0.9\n",
    "train_save_checkpoint_epochs=5\n",
    "train_keep_checkpoint_max = 10\n",
    "rank_id = 0\n",
    "device_id = 0\n",
    "device_num =1\n",
    "\n",
    "# from src.lr_scheduler import poly_lr\n",
    "import os\n",
    "import sys\n",
    "import logging\n",
    "import argparse\n",
    "# import yaml\n",
    "import mindspore.nn as nn\n",
    "from mindspore import Model\n",
    "from mindspore import context\n",
    "from mindspore import set_seed\n",
    "from mindspore.context import ParallelMode\n",
    "from mindspore.communication import init\n",
    "from mindspore.train.callback import CheckpointConfig\n",
    "from mindspore.train.callback import ModelCheckpoint\n",
    "from mindspore.train.callback import LossMonitor\n",
    "from mindspore.train.callback import TimeMonitor\n",
    "\n",
    "iters_per_epoch = train_data_size\n",
    "total_train_steps = iters_per_epoch * epoch\n",
    "base_lr = optimizer_init_lr\n",
    "iter_lr = poly_lr(base_lr, total_train_steps, total_train_steps, end_lr=0.0, power=0.9)\n",
    "\n",
    "network = ICNetdc(pretrained_path=train_pretrained_model_path, norm_layer=nn.BatchNorm2d)\n",
    "\n",
    "optim = nn.SGD(params=network.trainable_params(), learning_rate=iter_lr, momentum=optimizer_momentum,\n",
    "                   weight_decay=optimizer_weight_decay)\n",
    "\n",
    "model = Model(network, optimizer=optim, metrics=None)\n",
    "\n",
    "config_ck_train = CheckpointConfig(save_checkpoint_steps=iters_per_epoch * train_save_checkpoint_epochs,\n",
    "                                       keep_checkpoint_max=train_keep_checkpoint_max)\n",
    "ckpoint_cb_train = ModelCheckpoint(prefix='ICNet', directory=project_path + 'ckpt' + str(device_id),\n",
    "                                       config=config_ck_train)\n",
    "time_cb_train = TimeMonitor(data_size=dataset.get_dataset_size())\n",
    "loss_cb_train = LossMonitor()\n",
    "print(\"train begins------------------------------\")\n",
    "model.train(epoch=epoch, train_dataset=dataset, callbacks=[ckpoint_cb_train, loss_cb_train, time_cb_train],\n",
    "                dataset_sink_mode=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "045bd2b8-4bb0-42a0-867f-d358aca3ae2e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pretrained....\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "19617287-7cb0-40e3-8392-30e09ef90717",
   "metadata": {},
   "outputs": [],
   "source": [
    "下面是模型验证：\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "fa047551-14f2-4a37-a043-12468511ca45",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import sys\n",
    "import argparse\n",
    "import yaml\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "import mindspore.ops as ops\n",
    "from mindspore import load_param_into_net\n",
    "from mindspore import load_checkpoint\n",
    "from mindspore import Tensor\n",
    "import mindspore.dataset.vision as vision\n",
    "from src.models import ICNet\n",
    "from src.metric import SegmentationMetric\n",
    "from src.logger import SetupLogger\n",
    "\n",
    "\n",
    "class Evaluator:\n",
    "    \"\"\"evaluate\"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        # self.cfg = config\n",
    "\n",
    "        # get valid dataset images and targets\n",
    "        self.image_paths, self.mask_paths = _get_city_pairs(dataset_path, \"val\")\n",
    "        # self.image_paths,\n",
    "        # self.mask_paths \n",
    "        \n",
    "        # create network\n",
    "        # self.model = ICNetdc(nclass=19, pretrained_path=train_pretrained_model_path, norm_layer=nn.BatchNorm2d,istraining=False)\n",
    "        self.model = ICNet(nclass=19, pretrained_path=train_pretrained_model_path, istraining=False)\n",
    "\n",
    "        # load ckpt\n",
    "        checkpoint_path=\"/home/ma-user/work/ICNet/ckpt0/ICNet-115_1.ckpt\"\n",
    "        ckpt_file_name = checkpoint_path\n",
    "        param_dict = load_checkpoint(ckpt_file_name)\n",
    "        load_param_into_net(self.model, param_dict)\n",
    "\n",
    "        # evaluation metrics\n",
    "        self.metric = SegmentationMetric(19)\n",
    "\n",
    "    def eval(self):\n",
    "        \"\"\"evaluate\"\"\"\n",
    "        self.metric.reset()\n",
    "        model = self.model\n",
    "        model = model.set_train(False)\n",
    "\n",
    "        logger.info(\"Start validation, Total sample: {:d}\".format(len(self.image_paths)))\n",
    "        list_time = []\n",
    "\n",
    "        for i in range(len(self.image_paths)):\n",
    "            image = Image.open(self.image_paths[i]).convert('RGB')  # image shape: (W,H,3)\n",
    "            mask = Image.open(self.mask_paths[i])  # mask shape: (W,H)\n",
    "\n",
    "            image = self._img_transform(image)  # image shape: (3,H,W) [0,1]\n",
    "            mask = self._mask_transform(mask)  # mask shape: (H,w)\n",
    "\n",
    "            image = Tensor(image)\n",
    "\n",
    "            expand_dims = ops.ExpandDims()\n",
    "            image = expand_dims(image, 0)\n",
    "\n",
    "            start_time = time.time()\n",
    "            output = model(image)\n",
    "            end_time = time.time()\n",
    "            step_time = end_time - start_time\n",
    "\n",
    "            output = output.asnumpy()\n",
    "            mask = np.expand_dims(mask.asnumpy(), axis=0)\n",
    "            self.metric.update(output, mask)\n",
    "            list_time.append(step_time)\n",
    "\n",
    "        mIoU, pixAcc = self.metric.get()\n",
    "\n",
    "        average_time = sum(list_time) / len(list_time)\n",
    "\n",
    "        print(\"avgmiou\", mIoU)\n",
    "        print(\"avg_pixacc\", pixAcc)\n",
    "        print(\"avgtime\", average_time)\n",
    "\n",
    "    def _img_transform(self, image):\n",
    "        \"\"\"img_transform\"\"\"\n",
    "        to_tensor = vision.ToTensor()\n",
    "        normalize = vision.Normalize([.485, .456, .406], [.229, .224, .225], is_hwc=False)\n",
    "        image = to_tensor(image)\n",
    "        image = normalize(image)\n",
    "        return image\n",
    "\n",
    "    def _mask_transform(self, mask):\n",
    "        mask = self._class_to_index(np.array(mask).astype('int32'))\n",
    "        return Tensor(np.array(mask).astype('int32'))  # torch.LongTensor\n",
    "\n",
    "    def _class_to_index(self, mask):\n",
    "        \"\"\"assert the value\"\"\"\n",
    "        values = np.unique(mask)\n",
    "        self._key = np.array([-1, -1, -1, -1, -1, -1,\n",
    "                              -1, -1, 0, 1, -1, -1,\n",
    "                              2, 3, 4, -1, -1, -1,\n",
    "                              5, -1, 6, 7, 8, 9,\n",
    "                              10, 11, 12, 13, 14, 15,\n",
    "                              -1, -1, 16, 17, 18])\n",
    "        self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')\n",
    "        for value in values:\n",
    "            assert value in self._mapping\n",
    "        # Get the index of each pixel value in the mask corresponding to _mapping\n",
    "        index = np.digitize(mask.ravel(), self._mapping, right=True)\n",
    "        # According to the above index index, according to _key, the corresponding mask image is obtained\n",
    "        return self._key[index].reshape(mask.shape)\n",
    "\n",
    "\n",
    "def _get_city_pairs(folder, split='train'):\n",
    "    \"\"\"get dataset img_mask_path_pairs\"\"\"\n",
    "\n",
    "    def get_path_pairs(image_folder, mask_folder):\n",
    "        img_paths = []\n",
    "        mask_paths = []\n",
    "        for root, _, files in os.walk(image_folder):\n",
    "            for filename in files:\n",
    "                if filename.endswith('.png'):\n",
    "                    imgpath = os.path.join(root, filename)\n",
    "                    foldername = os.path.basename(os.path.dirname(imgpath))\n",
    "                    maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')\n",
    "                    maskpath = os.path.join(mask_folder, foldername, maskname)\n",
    "                    if os.path.isfile(imgpath) and os.path.isfile(maskpath):\n",
    "                        img_paths.append(imgpath)\n",
    "                        mask_paths.append(maskpath)\n",
    "                    else:\n",
    "                        print('cannot find the mask or image:', imgpath, maskpath)\n",
    "        print('Found {} images in the folder {}'.format(len(img_paths), image_folder))\n",
    "        return img_paths, mask_paths\n",
    "\n",
    "    if split in ('train', 'val', 'test'):\n",
    "        # \"./Cityscapes/leftImg8bit/train\" or \"./Cityscapes/leftImg8bit/val\"\n",
    "        img_folder = os.path.join(folder, 'leftImg8bit/' + split)\n",
    "        # \"./Cityscapes/gtFine/train\" or \"./Cityscapes/gtFine/val\"\n",
    "        mask_folder = os.path.join(folder, 'gtFine/' + split)\n",
    "\n",
    "        img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)\n",
    "        return img_paths, mask_paths"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "48e465f5-935c-4e12-922c-0fe17a592745",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 1 images in the folder /home/ma-user/work/ICNet/data/cityscapes/leftImg8bit/val\n",
      "2022-10-17 10:03:20,554 semantic_segmentation INFO: Start validation, Total sample: 1\n",
      "2022-10-17 10:03:20,554 semantic_segmentation INFO: Start validation, Total sample: 1\n",
      "2022-10-17 10:03:20,554 semantic_segmentation INFO: Start validation, Total sample: 1\n",
      "2022-10-17 10:03:20,554 semantic_segmentation INFO: Start validation, Total sample: 1\n",
      "2022-10-17 10:03:20,554 semantic_segmentation INFO: Start validation, Total sample: 1\n",
      "2022-10-17 10:03:20,554 semantic_segmentation INFO: Start validation, Total sample: 1\n",
      "avgmiou 0.005846047910888579\n",
      "avg_pixacc 0.0700367013731069\n",
      "avgtime 15.604077816009521\n"
     ]
    }
   ],
   "source": [
    "train_ckpt_dir=\"./ckpt/\"\n",
    "model_name=\"icnet\"\n",
    "model_backbone=\"resnet50v1\"\n",
    "checkpoint_path=\"/home/ma-user/work/ICNet/ckpt0/ICNet-115_1.ckpt\"\n",
    "\n",
    "logger = SetupLogger(name=\"semantic_segmentation\",\n",
    "                         save_dir=train_ckpt_dir,\n",
    "                         distributed_rank=0,\n",
    "                         filename='{}_{}_evaluate_log.txt'.format(model_name,model_backbone))\n",
    "\n",
    "evaluator = Evaluator()\n",
    "evaluator.eval()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "MindSpore",
   "language": "python",
   "name": "mindspore"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
