{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\nPipeCoCo类\\n__init__: 用于封装网络模型\\n    default: \\n        client: 前两层\\n        server：剩余网络至全连接层\\n        划分块数: 2×2\\n\\n    partitioner:\\n        F和B的值要通过遍历后计算得出\\n        \\nsync: 用于同步客户端和服务端的模型\\n    要同步的包括area_map和模型阶段\\n    \\nlayer_partitioner:\\n    正向：保证用于下层计算的区域是可用的\\n    反向：保证要计算的区域不重复\\n'"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "PipeCoCo类\n",
    "__init__: 用于封装网络模型\n",
    "    default: \n",
    "        client: 前两层\n",
    "        server：剩余网络至全连接层\n",
    "        划分块数: 2×2\n",
    "\n",
    "    partitioner:\n",
    "        F和B的值要通过遍历后计算得出\n",
    "        \n",
    "sync: 用于同步客户端和服务端的模型\n",
    "    要同步的包括area_map和模型阶段\n",
    "    \n",
    "layer_partitioner:\n",
    "    正向：保证用于下层计算的区域是可用的\n",
    "    反向：保证要计算的区域不重复\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "import mindspore.dataset.vision.c_transforms as CV\n",
    "import mindspore.dataset.transforms.c_transforms as C\n",
    "from mindspore.dataset.vision import Inter\n",
    "from mindspore import dtype as mstype\n",
    "import mindspore.nn as nn\n",
    "from mindspore.common.initializer import Normal"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LeNet5(nn.Cell):\n",
    "    \"\"\"Lenet network structure.\"\"\"\n",
    "    # define the operator required\n",
    "    def __init__(self, num_class=10, num_channel=1):\n",
    "        super(LeNet5, self).__init__()\n",
    "        # 卷积核大小5*5，卷积核种类6，输入通道为1\n",
    "        self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')\n",
    "        self.max_pool2d1 = nn.MaxPool2d(kernel_size=2, stride=2)\n",
    "        self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')\n",
    "        self.max_pool2d2 = nn.MaxPool2d(kernel_size=2, stride=2)\n",
    "        self.flatten = nn.Flatten()\n",
    "        self.fc1 = nn.Dense(400, 120, weight_init=Normal(0.02))\n",
    "        self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))\n",
    "        self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))\n",
    "        self.relu = nn.ReLU()\n",
    "\n",
    "\n",
    "    # use the preceding operators to construct networks\n",
    "    def construct(self, x):\n",
    "        x = self.max_pool2d1(self.relu(self.conv1(x)))\n",
    "        x = self.max_pool2d2(self.relu(self.conv2(x)))\n",
    "        x = self.flatten(x)\n",
    "        x = self.relu(self.fc1(x))\n",
    "        x = self.relu(self.fc2(x))\n",
    "        x = self.fc3(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PipeCoCo:\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.block_size = 2\n",
    "        pass\n",
    "    \n",
    "    def area_partitioner(self, net, size, ):\n",
    "        pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((4, 14), (4, 14))"
      ]
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "计算当前层区域映射到的上一层区域\n",
    "'''\n",
    "def pre_area(x1, y1, x2, y2, size, stribe):\n",
    "    \n",
    "    if not isinstance(size,tuple):\n",
    "        size = (size,size)\n",
    "\n",
    "    if not isinstance(stribe,tuple):\n",
    "        stribe = (stribe,stribe)\n",
    "        \n",
    "    border_x = (x1*stribe[0], (x2-1)*stribe[0]+size[0])\n",
    "    border_y = (y1*stribe[1], (y2-1)*stribe[1]+size[1])\n",
    "    \n",
    "    return border_x, border_y\n",
    "\n",
    "pre_area(4,4,10,10,5,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{0: [((0, 1), (0, 1))],\n",
       " 1: [((0, 1), (1, 4))],\n",
       " 2: [((1, 4), (0, 1))],\n",
       " 3: [((1, 4), (1, 4))]}"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "最初划分层的划分方式\n",
    "'''\n",
    "def init_area_partition(size,block_size):\n",
    "    \n",
    "    area_map = {}\n",
    "    height = size[0]\n",
    "    width = size[1]\n",
    "    \n",
    "    rest_h = height%block_size\n",
    "    rest_w = width%block_size\n",
    "    \n",
    "    avg_h = height//block_size\n",
    "    avg_w = width//block_size\n",
    "    \n",
    "    h_units = [avg_h for _ in range(block_size-rest_h)]+[avg_h+1 for _ in range(rest_h) ]\n",
    "    w_units = [avg_w for _ in range(block_size-rest_w)]+[avg_w+1 for _ in range(rest_w)]\n",
    "    \n",
    "    if rest_h==0:\n",
    "        h_units[0] = h_units[0]-1\n",
    "        h_units[-1] = h_units[-1]+1\n",
    "    \n",
    "    if rest_w==0:\n",
    "        w_units[0] = w_units[0]-1\n",
    "        w_units[-1] = w_units[-1]+1 \n",
    "    \n",
    "    h = 0\n",
    "    for i,h_unit in enumerate(h_units):\n",
    "        border_x = (h,h+h_unit)\n",
    "        w = 0\n",
    "        for j,w_unit in enumerate(w_units):\n",
    "            border_y = (w,w+w_unit)\n",
    "            w+=w_unit\n",
    "            area_map[i*block_size+j] = [(border_x,border_y)]\n",
    "        h+=h_unit\n",
    "    return area_map\n",
    "  \n",
    "init_area_partition([4,4],2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "区域划分，将融合的层结构划分为block_size × block_size 数量\n",
    "'''\n",
    "def area_partitioner(size, block_size, layers):\n",
    "    \n",
    "    area_map = {}\n",
    "    pre_area_map = {}\n",
    "    # 用于均衡计算区域，将多余的计算单元分配给排序较后的区域\n",
    "    rest_h = size[0]%block_size\n",
    "    rest_w = size[1]%block_size\n",
    "    unit_h = size[0]//block_size\n",
    "    unit_w = size[1]//block_size\n",
    "    area_map = init_area_partition(size, block_size)\n",
    "    for i in range(block_size):\n",
    "        for j in range(block_size):\n",
    "            border_x, border_y = area_map[i*block_size+j][-1]\n",
    "            pre_area_map[i*block_size+j] = []\n",
    "            for x,layer in enumerate(layers):\n",
    "                stride = layer.stride\n",
    "                kernel_size = layer.kernel_size\n",
    "                # 找到上一个area的区域，然后去掉重复的\n",
    "                border_x,border_y = pre_area(border_x[0],border_y[0],border_x[1],border_y[1],\n",
    "                                             kernel_size,stride)\n",
    "                pre_area_map[i*block_size+j].append((border_x,border_y))\n",
    "                if x+1<len(layers):\n",
    "                    if i > 0:\n",
    "                        border_x = (max(area_map[(i-1)*block_size+j][x+1][0][1],border_x[0]),border_x[1])\n",
    "                    if j > 0:\n",
    "                        border_y = (max(area_map[i*block_size+j-1][x+1][1][1],border_y[0]),border_y[1])\n",
    "                    area_map[i*block_size+j].append((border_x,border_y))\n",
    "\n",
    "    for i in range(block_size):\n",
    "        for j in range(block_size):\n",
    "            area_map[i*block_size+j] =  area_map[i*block_size+j][::-1]\n",
    "            pre_area_map[i*block_size+j] =  pre_area_map[i*block_size+j][::-1]\n",
    "    return area_map,pre_area_map"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "用深度搜索的方式找出全部网络结构\n",
    "'''\n",
    "def dfs_search_layers(net, layers_list):\n",
    "    if len(net.cells())==0:\n",
    "        layers_list.append(net)\n",
    "        print(net.cls_name)\n",
    "        return\n",
    "    for layer in net.cells():\n",
    "        dfs_search_layers(layer,layers_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Conv2d\n",
      "MaxPool2d\n",
      "Conv2d\n",
      "MaxPool2d\n",
      "Flatten\n",
      "Dense\n",
      "Dense\n",
      "Dense\n",
      "ReLU\n"
     ]
    }
   ],
   "source": [
    "net = LeNet5()\n",
    "layers_list = []\n",
    "dfs_search_layers(net, layers_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "融合网络层：\n",
    "以LeNet5举例：conv2d—>maxpool—>conv2d—>maxpool\n",
    "'''\n",
    "fuse_layers = layers_list[:4][::-1]\n",
    "area_map = area_partitioner([5,5],2,fuse_layers)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "计算量统计\n",
    "MAC计算量\n",
    "C_in * K * K * H_out * W_out + C_in * C_out * H_out * W_out\n",
    "'''\n",
    "def cal_workload():\n",
    "    for j, block in enumerate(area_map.values()):\n",
    "        s = 0\n",
    "        print('block',j)\n",
    "        for i,layer in enumerate(fuse_layers):\n",
    "            if isinstance(layer,nn.MaxPool2d):\n",
    "                continue\n",
    "            area = block[i]\n",
    "    #         print(layer)\n",
    "            print(layer.cls_name,area)\n",
    "            HW = (area[0][1]-area[0][0])*(area[1][1]-area[1][0])\n",
    "            C_in = layer.in_channels\n",
    "            C_out = layer.out_channels\n",
    "            if isinstance(layer.kernel_size,tuple):\n",
    "                K = layer.kernel_size[0]\n",
    "            else:\n",
    "                K = layer.kernel_size\n",
    "            print(C_in*K*K*HW + C_in*C_out*HW)\n",
    "            s += C_in*K*K*HW + C_in*C_out*HW\n",
    "\n",
    "        print(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "def print_outputs_shape():\n",
    "    output = Tensor(np.ones([1, 1, 32, 32]), ms.float32)\n",
    "    for layer in layers_list:\n",
    "        output = layer(output)\n",
    "        print(layer.cls_name)\n",
    "        print(output.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "from mindspore import load_checkpoint, load_param_into_net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "from mindspore import load_checkpoint, load_param_into_net\n",
    "from mindspore import Tensor, Model\n",
    "from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor\n",
    "from mindspore.nn import Accuracy\n",
    "import os\n",
    "import mindspore.dataset as ds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_dataset(data_path, batch_size=32, repeat_size=1,\n",
    "                   num_parallel_workers=8):\n",
    "    \"\"\"\n",
    "    create dataset for train or test\n",
    "\n",
    "    Args:\n",
    "        data_path (str): Data path\n",
    "        batch_size (int): The number of data records in each group\n",
    "        repeat_size (int): The number of replicated data records\n",
    "        num_parallel_workers (int): The number of parallel workers\n",
    "    \"\"\"\n",
    "    # define dataset\n",
    "    mnist_ds = ds.MnistDataset(data_path)\n",
    "\n",
    "    # define some parameters needed for data enhancement and rough justification\n",
    "    resize_height, resize_width = 32, 32\n",
    "    rescale = 1.0 / 255.0\n",
    "    shift = 0.0\n",
    "    rescale_nml = 1 / 0.3081\n",
    "    shift_nml = -1 * 0.1307 / 0.3081\n",
    "\n",
    "    # according to the parameters, generate the corresponding data enhancement method\n",
    "    resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)\n",
    "    rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)\n",
    "    rescale_op = CV.Rescale(rescale, shift)\n",
    "    hwc2chw_op = CV.HWC2CHW()\n",
    "    type_cast_op = C.TypeCast(mstype.int32)\n",
    "\n",
    "    # using map to apply operations to a dataset\n",
    "    mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n",
    "    mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n",
    "    mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n",
    "    mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n",
    "    mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n",
    "\n",
    "    # process the generated dataset\n",
    "    buffer_size = 10000\n",
    "    mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size)\n",
    "    mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n",
    "    mnist_ds = mnist_ds.repeat(repeat_size)\n",
    "\n",
    "    return mnist_ds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from mindspore import Tensor, Model\n",
    "from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor\n",
    "from mindspore.nn import Accuracy\n",
    "import mindspore.nn as nn\n",
    "from mindspore.nn import SoftmaxCrossEntropyWithLogits\n",
    "from mindspore import context"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "# context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n",
    "lr = 0.01\n",
    "momentum = 0.9\n",
    "\n",
    "# create the network\n",
    "network = LeNet5()\n",
    "# define the optimizer\n",
    "# net_opt = nn.Adam(network.trainable_params())\n",
    "net_opt = nn.Momentum(network.trainable_params(), lr, momentum)\n",
    "\n",
    "# define the loss function\n",
    "net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "============== Starting Testing ==============\n",
      "============== Accuracy:{'Accuracy': 0.9840745192307693} ==============\n"
     ]
    }
   ],
   "source": [
    "print(\"============== Starting Testing ==============\")\n",
    "# load the saved model for evaluation\n",
    "mnist_path = \"../datasets/MNIST_Data\"\n",
    "model_path = \"./models/lenet/\"\n",
    "network = LeNet5()\n",
    "param_dict = load_checkpoint(model_path+\"/checkpoint_lenet-5_1875.ckpt\")\n",
    "# load parameter to the network\n",
    "load_param_into_net(network, param_dict)\n",
    "model = Model(network, net_loss, net_opt, metrics={\"Accuracy\": Accuracy()} )\n",
    "ds_eval = create_dataset(os.path.join(mnist_path, \"test\"))\n",
    "acc = model.eval(ds_eval, dataset_sink_mode=False)\n",
    "print(\"============== Accuracy:{} ==============\".format(acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "def PipeCoCo_forward(x, layers):\n",
    "    pass\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\n对比:按照原流程和PipeCoCo方法计算结果比较\\n\\n\\n原流程:\\n对前四层fused_layers执行计算，然后全连接操作等实现分类\\n\\nPipeCoCo:\\n对前四层fused_layer执行PipeCoCo计算\\n\\n'"
      ]
     },
     "execution_count": 73,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "对比:按照原流程和PipeCoCo方法计算结果比较\n",
    "\n",
    "\n",
    "原流程:\n",
    "对前四层fused_layers执行计算，然后全连接操作等实现分类\n",
    "\n",
    "PipeCoCo:\n",
    "对前四层fused_layer执行PipeCoCo计算\n",
    "\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import matplotlib\n",
    "import numpy as np\n",
    "import mindspore.dataset as ds\n",
    "import mindspore"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All the figures in this group are predicted correctly!\n",
      "[6 1] <--Predicted figures\n",
      "[6 1] <--The right number\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAFwAAABCCAYAAADaM7GzAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAIBElEQVR4nO2be2wU1xWHvzO73l2v3zY2GPArvPIQDSGmLUQBmshKS1OSVCWJoKg0QYBKWqGitoloUOm7aiPlH0LVQhpaqWlC07QNCVDahkdQaCikRgIHE5u1HWyDX9he47U9O7d/eEm84Neu744XeT9ptTP3zpxz9Nszd+65MytKKRLYhzHeAUw0EoLbTEJwm0kIbjMJwW0mIbjNJAS3mbgQXASHCD8WoV6EThHeFyFzvOMaDBF+I8I5ESwR1kR6vnbBRXBGcdo2YBGwEEgHVgMBnXENRpSxlgPfAE5F5VQpNaoPKB+oZ0CdBdUG6negPKCWgvoI1PdANYL6AygD1NOgqkC1gHoVVPYQdrNA+UHNGG0s4xXrdT7eAbUm0tgizfBVwAPADGA28P1Q+xQgGygC1gHfBB4GlgBTgTZg+zUjIpwWYWVody5gAl8RoVGEShE2RhiXXbGOnQizZsOA/WWhrFgKqheUZ0BfBaj7B+zng+oD5RzE7kpQCtQuUMmgPgWqCVTZGDNce6w6MjzSMaxuwHYN/RkB0KRU2JhbBLwugjWgLQhMBi5eZ7M79P1DpegGTovwJ2AZcDDC+GId65iJVPCCAduFQH1o+/olxzrgCaU4NgqbpwexoWMJMxaxjplIx/CNIkwXIRvYArwyxHG/Bn4iQhGACLkiPDTYgUpRBRwFtojgFuE24HFgb4SxxTzWUL9LBA8gQJIIHpEIdIzyzn8F1G5Q3mt3/uuONUB9G9Q5UJ2h8fOnA/rPgFo1YH8aqP2h2Uo1qPUaZym6Yz0UuucM/CwdbWzSb2RkRPABa5Xin6P+NceJeI41LirNiURCcJsZ9ZCSQA+JDLeZhOA2M2zhU2asiKvx5qC1R4bqu1liTWS4zUSzHjwijtxc/PeUUPdQMKy94K8OUo9VEWxuiYXbm4KYCC6pXtpmOdm5eFdY+6Yz60krT4GE4HpRTgdmCtyfHJ7hphcwJvYopl1ww+MhmJVCb7o18sHxhgjOkiKUISACHX6sK+2onh5tLrQKLk4nUjSdprtTmXZHY1hfu9WNBOlf64lTjORkfI9NxXIBAtlnLTJPNGBeqNHmQ6vgjoJp1JflUbCimr/P2h/Wt711HslNCnr7dLrUhwhGehqvrn+OO1zJANx2bDWu9lxc8Sp43SPTKFt1nOfyb3ygfWDLEvL+fRqzq0uny5jiSjIxU7x4PB6sgJ6XCCb2HWwEjpS+SMHmSi59/S5tNrUJXrt1EUtXnuCpSUfC2i/0+fnsdzaQcvwCVnfMXzWJCsPjwVo8jxl7WylxOj5uzzCSmeT2E3QNWeBG7kuHkdqtiyhbfoKncg9R6PQC4LcC/L5jEl/a8V2y/1FFsKUVrOAIlsYJh4PetCS2TT6M13B93Ly5YT4HDpQy5d1Oba60CJ57TwNrJx2lxOnBIf0m+5TFqa5iil65SLC5OX7FDqEckOXwhrUdaZhJzmmFUeHT5keL4HMyLzPVESRJ+i/Hdqubw4E83qiY2z+liuOp4HD4u924r5hYnfoyfGyzlFChkJlUg4NPxrl6U7Hz4r3kveke+lzDgSM9FXJzwtubWgh2+Mf9iqjs6yLQ6cbRo7eAG5Pg1wqF1SmHccsnpurMDM5UTWP2y8cHP1EER24OgTsLaVjkCuvKfycLT3lt/zBk05UhLhdBd/jF/rOGz5Na4cJ1uRmdP330gt9QKIQLN9yrPI6cbLoWFFO3wqS67IWwvpKiJylyFeN9z7JlVdHweFAFk2m9NVzw/7w5l5LX6jGrfXr9abUG9Kg+Gs0MJOAYtN/weOhYMpPOde1Ul714Q/+FB3bRts5P5+KZGB6P7vBuQKbn43skm4oNL4x8sAa0C7718gJ+sftR5jxzdtD+xifnU7z5HG/Pf2lIG7EoOOIF7auFPZYTZzc33NnP755P6YwaHs35C/d5PyTDSB3SRiwKjnghJuvhAOJ2I7fewgeb+ue2v1q4h8946sk0nCSLd4SzwRAFdugtMqgfFSPf2gVfkHqBfQtv56Lrbq5Otdi5+LcAlLr9w2b1QF7qyONv5fMoOj8+K4vLzi0jq9JCdepfaItecKVQfX08W7ucXxa9TrHTi0MM7k2uYdu8N3h/dhGF7pYBT32SR2X2NX86PzrxRSb/y0lKeR1m1AFGT+3+YgrLm7A6OrTbHlOGq66r+F6+i0PfmsljaT5SxUOhM5XCtDYeT2uL2N7Jnl6e/u+XyXvLTfbROsyPtL8PPypyzpqo2nqtT3quMaZZihUIkLvjXQ623k6daeG3AgRVdJVZc7CLDWe+ypQ/u8k6eH7cxI41WqaF750v4dm65fyx8xZ85tWIzg2q/h/qB433kflzL6lvldv7GoVSev5vMUq03DRnrTlJJ7Bj48McWXWW7YX7yDCGH7ODyqLDClBjOvigN5/qBzMwLv2Pm/DRc0RonaVM2XUKX+OdfO5rkzlVOtQ/PPo5GnCy9vhG5myuRylF8HKTzlDiFq2CW4EA6Yc/JLUmnyX564Y91ui1mNXYhXnp8k27fBsN2ufhwZZWpNOPt2IE00qh+szxF3uIwidW6K80lUL19MRkShUTOvxkVlo8WPkF9s7eF3N3MSvtbxasK+1knWzm4p4SNj1RCoDrSh8EY/MAZMILrnp6sKp8TO3p5e2kTwMwvbaOoBmbGnfCCw6gTBPTV8uU52sBYrqckHgRyGYSgttM4m+DNpPIcJtJCG4zCcFtJiG4zSQEt5mE4Dbzfzy/Dv+lCnIXAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 2 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "ds_eval = create_dataset(os.path.join(mnist_path, \"test\"),batch_size=2)\n",
    "ds_test = ds_eval.create_dict_iterator()\n",
    "data = next(ds_test)\n",
    "images = data[\"image\"].asnumpy()\n",
    "labels = data[\"label\"].asnumpy()\n",
    "\n",
    "output = model.predict(Tensor(data['image']))\n",
    "pred = np.argmax(output.asnumpy(), axis=1)\n",
    "err_num = []\n",
    "index = 1\n",
    "for i in range(len(labels)):\n",
    "    plt.subplot(4, 8, i+1)\n",
    "    color = 'blue' if pred[i] == labels[i] else 'red'\n",
    "    plt.title(\"pre:{}\".format(pred[i]), color=color)\n",
    "    plt.imshow(np.squeeze(images[i]))\n",
    "    plt.axis(\"off\")\n",
    "    if color == 'red':\n",
    "        index = 0\n",
    "        print(\"Row {}, column {} is incorrectly identified as {}, the correct value should be {}\".format(int(i/8)+1, i%8+1, pred[i], labels[i]), '\\n')\n",
    "if index:\n",
    "    print(\"All the figures in this group are predicted correctly!\")\n",
    "print(pred, \"<--Predicted figures\")\n",
    "print(labels, \"<--The right number\")\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Conv2d\n",
      "MaxPool2d\n",
      "Conv2d\n",
      "MaxPool2d\n",
      "Flatten\n",
      "Dense\n",
      "Dense\n",
      "Dense\n",
      "ReLU\n"
     ]
    }
   ],
   "source": [
    "layers_list = []\n",
    "dfs_search_layers(network, layers_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = Tensor(images)\n",
    "fused_layer = layers_list[:4]\n",
    "for i,layer in enumerate(fused_layer):\n",
    "    if i%2==0 and i>0:\n",
    "        x = layers_list[-1](x)\n",
    "    x = layer(x)\n",
    "\n",
    "for i,layer in enumerate(layers_list[-5:-1]):\n",
    "    x = layer(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([6, 1], dtype=int64)"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.argmax(x.asnumpy(), axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[array([[[[-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          ...,\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293]]],\n",
       " \n",
       " \n",
       "        [[[-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          ...,\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293],\n",
       "          [-0.42421293, -0.42421293, -0.42421293, ..., -0.42421293,\n",
       "           -0.42421293, -0.42421293]]]], dtype=float32),\n",
       " array([[[[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]]],\n",
       " \n",
       " \n",
       "        [[[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]]]]),\n",
       " array([[[[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]]],\n",
       " \n",
       " \n",
       "        [[[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]]]]),\n",
       " array([[[[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         ...,\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]]],\n",
       " \n",
       " \n",
       "        [[[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         ...,\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          ...,\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.],\n",
       "          [0., 0., 0., ..., 0., 0., 0.]]]]),\n",
       " array([[[[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]]],\n",
       " \n",
       " \n",
       "        [[[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]],\n",
       " \n",
       "         [[0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.],\n",
       "          [0., 0., 0., 0., 0.]]]])]"
      ]
     },
     "execution_count": 80,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = Tensor(images)\n",
    "fused_layers = layers_list[:4]\n",
    "output = [x.asnumpy()] \n",
    "for i,layer in enumerate(fused_layer):\n",
    "    if i%2==0 and i>0:\n",
    "        x = layers_list[-1](x)\n",
    "    x = layer(x)\n",
    "    if hasattr(layer,'out_channels'):\n",
    "        output.append(np.zeros([x.shape[0], layer.out_channels,x.shape[-2], x.shape[-1]]))\n",
    "    else:\n",
    "        output.append(np.zeros([x.shape[0], fused_layer[i-1].out_channels,x.shape[-2], x.shape[-1]]))\n",
    "output\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "area_map,pre_area_map = area_partitioner(output[-1].shape[-2:],2,fused_layer[::-1]) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i,block in enumerate(area_map.values()):\n",
    "    for j,layer in enumerate(fused_layer):\n",
    "        pre_layer_area = pre_area_map[i]\n",
    "        x = Tensor(output[j][:,:,pre_layer_area[j][0][0]:pre_layer_area[j][0][1],\n",
    "                             pre_layer_area[j][1][0]:pre_layer_area[j][1][1]], mindspore.float32)\n",
    "        if j%2==0 and j>0:\n",
    "            x = layers_list[-1](x)\n",
    "        x = layer(x)\n",
    "        output[j+1][:,:,block[j][0][0]:block[j][0][1],\n",
    "                    block[j][1][0]:block[j][1][1]] = x.asnumpy()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = Tensor(output[-1], mindspore.float32)\n",
    "for i,layer in enumerate(layers_list[-5:-1]):\n",
    "    x = layer(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(2, 10)\n"
     ]
    }
   ],
   "source": [
    "print(x.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dense<input_channels=84, output_channels=10, has_bias=True>\n",
      "[((16, 28), (16, 28)), ((8, 14), (8, 14)), ((4, 10), (4, 10)), ((2, 5), (2, 5))]\n",
      "[((16, 32), (16, 32)), ((16, 28), (16, 28)), ((4, 14), (4, 14)), ((4, 10), (4, 10))]\n"
     ]
    }
   ],
   "source": [
    "print(layer)\n",
    "print(block)\n",
    "print(pre_layer_area)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "mindspore",
   "language": "python",
   "name": "mindspore"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
