{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Caffe Relay 算子测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": [
     "remove-cell"
    ]
   },
   "outputs": [],
   "source": [
    "import set_env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import logging\n",
    "import numpy as np\n",
    "\n",
    "from google.protobuf import text_format\n",
    "import caffe\n",
    "from caffe import layers as L, params as P\n",
    "from caffe.proto import caffe_pb2 as pb\n",
    "\n",
    "import tvm\n",
    "import tvm.testing\n",
    "from tvm import relay\n",
    "from tvm.contrib import graph_executor\n",
    "from tvm.contrib.download import download_testdata\n",
    "os.environ[\"GLOG_minloglevel\"] = \"2\"\n",
    "\n",
    "logging.basicConfig(level=logging.ERROR)\n",
    "\n",
    "def save_prototxt(n_netspec, f_path):\n",
    "    \"\"\"Generate .prototxt file according to caffe.NetSpec\"\"\"\n",
    "    s = n_netspec.to_proto()\n",
    "    with open(f_path, \"w\") as f:\n",
    "        f.write(str(s))\n",
    "\n",
    "\n",
    "def save_solver(solver_file, proto_file, blob_file):\n",
    "    \"\"\"Define a solver proto, you can change the configs.\"\"\"\n",
    "    blob_file_prefix = blob_file.split(\".caffemodel\")[0]\n",
    "    s = pb.SolverParameter()\n",
    "    s.train_net = proto_file\n",
    "    s.base_lr = 0.01\n",
    "    s.momentum = 0.9\n",
    "    s.weight_decay = 0.0005\n",
    "    s.lr_policy = \"inv\"\n",
    "    s.gamma = 0.0001\n",
    "    s.power = 0.75\n",
    "    s.display = 1\n",
    "    s.max_iter = 100000\n",
    "    s.snapshot = 100000\n",
    "    s.snapshot_prefix = blob_file_prefix\n",
    "\n",
    "    with open(solver_file, \"w\") as f:\n",
    "        f.write(str(s))\n",
    "\n",
    "\n",
    "def save_caffemodel(solver_file, blob_file):\n",
    "    \"\"\"Generate .caffemodel file.\"\"\"\n",
    "    solver = caffe.SGDSolver(solver_file)\n",
    "    solver.net.save(blob_file)\n",
    "\n",
    "def gen_model_files(n_netspec, proto_file, blob_file, solver_file):\n",
    "    save_prototxt(n_netspec, proto_file)\n",
    "    save_solver(solver_file, proto_file, blob_file)\n",
    "    save_caffemodel(solver_file, blob_file)\n",
    "\n",
    "def run_caffe(data, proto_file, blob_file):\n",
    "    \"\"\"Run caffe model by Caffe according to .caffemodel and .prototxt\"\"\"\n",
    "    net = caffe.Net(proto_file, blob_file, caffe.TEST)\n",
    "    if isinstance(data, (list, tuple)):\n",
    "        for idx, d in enumerate(data):\n",
    "            net.blobs[\"data\" + str(idx)].data[...] = d\n",
    "    else:\n",
    "        net.blobs[\"data\"].data[...] = data\n",
    "    out = net.forward()\n",
    "\n",
    "    caffe_output = []\n",
    "    for i in range(len(out.keys())):\n",
    "        if \"output\" + str(i) not in out.keys():\n",
    "            caffe_output.clear()\n",
    "            return list(out.values())\n",
    "        caffe_output.append(out[\"output\" + str(i)])\n",
    "    return caffe_output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def siso_op(shape, func, *args, **kwargs):\n",
    "    \"\"\"Create single input and single output Caffe op\"\"\"\n",
    "    n = caffe.NetSpec()\n",
    "    n.data = L.Input(input_param={\"shape\": {\"dim\": list(shape)}})\n",
    "    n.output = func(n.data, *args, **kwargs)\n",
    "    return n\n",
    "\n",
    "def miso_op(shapes, func, *args, **kwargs):\n",
    "    \"\"\"Create multi input and single output Caffe op\"\"\"\n",
    "    n = caffe.NetSpec()\n",
    "    if not isinstance(shapes, (tuple, list)):\n",
    "        raise TypeError(f\"Need tuple or list but get {type(shapes)}\")\n",
    "    input_list = []\n",
    "    for idx, shape in enumerate(shapes):\n",
    "        n[\"data\" + str(idx)] = L.Input(input_param={\"shape\": {\"dim\": list(shape)}})\n",
    "        input_list.append(n[\"data\" + str(idx)])\n",
    "    n.output = func(*input_list, *args, **kwargs)\n",
    "    return n\n",
    "\n",
    "\n",
    "def simo_op(shape, func, *args, **kwargs):\n",
    "    \"\"\"Create single input and multi output Caffe op\"\"\"\n",
    "    n = caffe.NetSpec()\n",
    "    n.data = L.Input(input_param={\"shape\": {\"dim\": list(shape)}})\n",
    "    output_list = func(n.data, *args, **kwargs)\n",
    "    for idx, out in enumerate(output_list):\n",
    "        n[\"output\" + str(idx)] = out\n",
    "    return n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def creat_op(shapes, func_op, **kwargs):\n",
    "    shape_list = []\n",
    "    if isinstance(shapes, (list, tuple)):\n",
    "        n = miso_op(shapes, func_op, **kwargs)\n",
    "        for shape in shapes:\n",
    "            shape_list.extend(list(shape))\n",
    "    else:\n",
    "        output_num = 1\n",
    "        if \"ntop\" in kwargs:\n",
    "            output_num = kwargs[\"ntop\"]\n",
    "        if output_num == 1:\n",
    "            n = siso_op(shapes, func_op, **kwargs)\n",
    "        else:\n",
    "            n = simo_op(shapes, func_op, **kwargs)\n",
    "        shape_list = list(shapes)\n",
    "    return n, shape_list"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe BatchNorm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before InitGoogleLogging() is written to STDERR\n",
      "I0914 09:12:10.797545 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/BatchNorm.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/BatchNorm\"\n",
      "I0914 09:12:10.797695 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/BatchNorm.prototxt\n",
      "I0914 09:12:10.798242 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data\"\n",
      "  type: \"Input\"\n",
      "  top: \"data\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"BatchNorm\"\n",
      "  bottom: \"data\"\n",
      "  top: \"output\"\n",
      "  batch_norm_param {\n",
      "    moving_average_fraction: 0.999\n",
      "    eps: 1e-05\n",
      "  }\n",
      "}\n",
      "I0914 09:12:10.798317 1956768 layer_factory.hpp:77] Creating layer data\n",
      "I0914 09:12:10.798336 1956768 net.cpp:86] Creating Layer data\n",
      "I0914 09:12:10.798341 1956768 net.cpp:382] data -> data\n",
      "I0914 09:12:10.798357 1956768 net.cpp:124] Setting up data\n",
      "I0914 09:12:10.798362 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:10.798367 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:10.798370 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:10.798384 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:10.798388 1956768 net.cpp:408] output <- data\n",
      "I0914 09:12:10.798391 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:10.798404 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:10.798408 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:10.798411 1956768 net.cpp:139] Memory required for data: 2400\n",
      "I0914 09:12:10.798420 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:10.798424 1956768 net.cpp:202] data does not need backward computation.\n",
      "I0914 09:12:10.798426 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:10.798430 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:10.798439 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:10.799118 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:10.799129 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:10.799131 1956768 _caffe.cpp:142] Net('./.temp/BatchNorm.prototxt', 1, weights='./.temp/BatchNorm.caffemodel')\n",
      "I0914 09:12:10.799191 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data\"\n",
      "  type: \"Input\"\n",
      "  top: \"data\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"BatchNorm\"\n",
      "  bottom: \"data\"\n",
      "  top: \"output\"\n",
      "  batch_norm_param {\n",
      "    moving_average_fraction: 0.999\n",
      "    eps: 1e-05\n",
      "  }\n",
      "}\n",
      "I0914 09:12:10.799225 1956768 layer_factory.hpp:77] Creating layer data\n",
      "I0914 09:12:10.799233 1956768 net.cpp:86] Creating Layer data\n",
      "I0914 09:12:10.799237 1956768 net.cpp:382] data -> data\n",
      "I0914 09:12:10.799245 1956768 net.cpp:124] Setting up data\n",
      "I0914 09:12:10.799249 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:10.799254 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:10.799257 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:10.799263 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:10.799266 1956768 net.cpp:408] output <- data\n",
      "I0914 09:12:10.799270 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:10.799283 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:10.799285 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:10.799289 1956768 net.cpp:139] Memory required for data: 2400\n",
      "I0914 09:12:10.799297 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:10.799301 1956768 net.cpp:202] data does not need backward computation.\n",
      "I0914 09:12:10.799304 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:10.799309 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:10.800050 1956768 upgrade_proto.cpp:79] Attempting to upgrade batch norm layers using deprecated params: ./.temp/BatchNorm.caffemodel\n",
      "I0914 09:12:10.800057 1956768 upgrade_proto.cpp:82] Successfully upgraded batch norm layers using deprecated params.\n"
     ]
    }
   ],
   "source": [
    "op_name = \"BatchNorm\"\n",
    "root_dir = \"./.temp\"\n",
    "proto_file = f\"{root_dir}/{op_name}.prototxt\"\n",
    "blob_file = f\"{root_dir}/{op_name}.caffemodel\"\n",
    "solver_file = f\"{root_dir}/{op_name}_solver.prototxt\"\n",
    "shape = (1, 3, 10, 10)\n",
    "n_netspec = siso_op(shape, L.BatchNorm, moving_average_fraction=0.999, eps=1e-5)\n",
    "# obtain the .caffemodel file and .prototxt file\n",
    "gen_model_files(n_netspec, proto_file, blob_file, solver_file)\n",
    "# run model in Caffe\n",
    "data = np.random.rand(*shape).astype(np.float32)\n",
    "caffe_out = run_caffe(data, proto_file, blob_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data: Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), float32] <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), float32] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), float32] {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> multiply(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data, meta[relay<span style=\"color: #AA22FF; font-weight: bold\">.</span>Constant][<span style=\"color: #008000\">0</span>] <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>), float32] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>) <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), float32] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>;\n",
       "  add(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>, meta[relay<span style=\"color: #AA22FF; font-weight: bold\">.</span>Constant][<span style=\"color: #008000\">1</span>] <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>), float32] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>) <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), float32] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "init_net = pb.NetParameter()\n",
    "predict_net = pb.NetParameter()\n",
    "# load model\n",
    "with open(proto_file, \"r\") as f:\n",
    "    text_format.Merge(f.read(), predict_net)\n",
    "# load blob\n",
    "with open(blob_file, \"rb\") as f:\n",
    "    init_net.ParseFromString(f.read())\n",
    "shape_dict = {\"data\": shape}\n",
    "dtype_dict = {\"data\": \"float32\"}\n",
    "mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)\n",
    "with tvm.transform.PassContext(opt_level=3):\n",
    "    mod = relay.quantize.prerequisite_optimize(mod, params)\n",
    "mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe concat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0.5722476838666538,\n",
       " array([[[[0.61123624, 0.37588209, 0.02169732, 0.18100491, 0.95849767,\n",
       "           0.02395107, 0.20080887, 0.33766822, 0.11885211, 0.86539518],\n",
       "          [0.10359814, 0.3886911 , 0.65470521, 0.05712175, 0.71074179,\n",
       "           0.42249166, 0.37142238, 0.65421255, 0.99183236, 0.9922702 ],\n",
       "          [0.11899492, 0.46457062, 0.89906396, 0.33470963, 0.48111082,\n",
       "           0.86251228, 0.52144183, 0.91341972, 0.77010121, 0.49809878],\n",
       "          [0.89288594, 0.59797711, 0.41111909, 0.37215822, 0.82308275,\n",
       "           0.43773541, 0.48900654, 0.09655104, 0.6752475 , 0.73714895],\n",
       "          [0.94505647, 0.751491  , 0.79973422, 0.49016049, 0.98444054,\n",
       "           0.74379714, 0.10746913, 0.25731962, 0.25730886, 0.74203572],\n",
       "          [0.31960305, 0.73112817, 0.87677291, 0.15800022, 0.80400933,\n",
       "           0.44788138, 0.9895867 , 0.42715959, 0.71428694, 0.16113941],\n",
       "          [0.1914514 , 0.75825183, 0.50116843, 0.65810231, 0.75718552,\n",
       "           0.53124754, 0.4343793 , 0.32263688, 0.65770697, 0.61356753],\n",
       "          [0.19378037, 0.6395654 , 0.94830926, 0.37040329, 0.43761136,\n",
       "           0.79517072, 0.49994165, 0.87566226, 0.76793298, 0.08770912],\n",
       "          [0.24788341, 0.76420478, 0.69379041, 0.31979159, 0.38141878,\n",
       "           0.87133286, 0.48002551, 0.99319249, 0.92158005, 0.41340018],\n",
       "          [0.76364848, 0.14923229, 0.58972456, 0.6497025 , 0.72261034,\n",
       "           0.46587944, 0.65899584, 0.06284198, 0.10116845, 0.69050762]],\n",
       " \n",
       "         [[0.55086051, 0.41049861, 0.72613161, 0.10209068, 0.77561294,\n",
       "           0.60741189, 0.0990884 , 0.97687032, 0.48856226, 0.65001529],\n",
       "          [0.42668348, 0.85970584, 0.9009646 , 0.16837185, 0.07728554,\n",
       "           0.49002088, 0.14818526, 0.95035504, 0.87802133, 0.91886464],\n",
       "          [0.18567169, 0.82489557, 0.0105433 , 0.08790348, 0.00896896,\n",
       "           0.93911221, 0.69070265, 0.88624885, 0.01078138, 0.78754388],\n",
       "          [0.9911567 , 0.3502372 , 0.86614406, 0.69255045, 0.10446331,\n",
       "           0.99044252, 0.44933799, 0.89952532, 0.60757778, 0.81763927],\n",
       "          [0.60009809, 0.65632015, 0.2662486 , 0.56157217, 0.82138236,\n",
       "           0.1753327 , 0.01276561, 0.01786617, 0.58354147, 0.50236365],\n",
       "          [0.15670643, 0.03074663, 0.57945941, 0.48933184, 0.47575001,\n",
       "           0.2786405 , 0.30590399, 0.15175435, 0.95480545, 0.60934913],\n",
       "          [0.62542567, 0.36184765, 0.48864965, 0.78854085, 0.47227953,\n",
       "           0.20096491, 0.40530239, 0.66738742, 0.7947208 , 0.97882672],\n",
       "          [0.71914019, 0.28877839, 0.77869619, 0.98258026, 0.75842418,\n",
       "           0.27776089, 0.61026718, 0.11026115, 0.35014719, 0.85779448],\n",
       "          [0.43332412, 0.13963806, 0.22700522, 0.2184799 , 0.95821739,\n",
       "           0.60884812, 0.41895097, 0.39532951, 0.94120575, 0.88304259],\n",
       "          [0.36188015, 0.71448186, 0.12941785, 0.59089551, 0.3402902 ,\n",
       "           0.1287766 , 0.92626208, 0.31594477, 0.35793065, 0.74701252]]]]))"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.random.rand(), np.random.rand(1, 2, 10, 10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_concat(shape_list, axis=1, op_name=\"concat\",):\n",
    "    proto_file = f\"{root_dir}/{op_name}.prototxt\"\n",
    "    blob_file = f\"{root_dir}/{op_name}.caffemodel\"\n",
    "    solver_file = f\"{root_dir}/{op_name}_solver.prototxt\"\n",
    "    n_netspec = miso_op(shape_list, L.Concat, axis=axis)\n",
    "    # obtain the .caffemodel file and .prototxt file\n",
    "    gen_model_files(n_netspec, proto_file, blob_file, solver_file)\n",
    "    # run model in Caffe\n",
    "    data = [np.random.rand(*shape).astype(np.float32) for shape in shape_list]\n",
    "    caffe_out = run_caffe(data, proto_file, blob_file)\n",
    "    init_net = pb.NetParameter()\n",
    "    predict_net = pb.NetParameter()\n",
    "    # load model\n",
    "    with open(proto_file, \"r\") as f:\n",
    "        text_format.Merge(f.read(), predict_net)\n",
    "    # load blob\n",
    "    with open(blob_file, \"rb\") as f:\n",
    "        init_net.ParseFromString(f.read())\n",
    "    shape_dict = [{f\"data_{k}\": shape} for k, shape in enumerate(shape_list)]\n",
    "    dtype_dict = [{f\"data_{k}\": \"float32\"} for k, shape in enumerate(shape_list)]\n",
    "    mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)\n",
    "    return mod, params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:11.276784 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/concat.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/concat\"\n",
      "I0914 09:12:11.276893 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/concat.prototxt\n",
      "I0914 09:12:11.276965 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 2\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Concat\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  concat_param {\n",
      "    axis: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.277004 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.277014 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.277019 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.277031 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.277035 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.277040 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.277045 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:11.277050 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:11.277055 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:11.277061 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:11.277065 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)\n",
      "I0914 09:12:11.277068 1956768 net.cpp:139] Memory required for data: 2000\n",
      "I0914 09:12:11.277072 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.277088 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.277092 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.277097 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:11.277102 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.277109 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.277113 1956768 net.cpp:131] Top shape: 1 5 10 10 (500)\n",
      "I0914 09:12:11.277117 1956768 net.cpp:139] Memory required for data: 4000\n",
      "I0914 09:12:11.277120 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.277124 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:11.277127 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.277130 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.277135 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:11.277148 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:11.277402 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:11.277412 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:11.277415 1956768 _caffe.cpp:142] Net('./.temp/concat.prototxt', 1, weights='./.temp/concat.caffemodel')\n",
      "I0914 09:12:11.277477 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 2\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Concat\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  concat_param {\n",
      "    axis: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.277510 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.277516 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.277521 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.277529 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.277534 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.277537 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.277541 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:11.277546 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:11.277550 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:11.277557 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:11.277560 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)\n",
      "I0914 09:12:11.277565 1956768 net.cpp:139] Memory required for data: 2000\n",
      "I0914 09:12:11.277567 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.277575 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.277577 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.277581 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:11.277586 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.277592 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.277596 1956768 net.cpp:131] Top shape: 1 5 10 10 (500)\n",
      "I0914 09:12:11.277599 1956768 net.cpp:139] Memory required for data: 4000\n",
      "I0914 09:12:11.277603 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.277606 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:11.277609 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.277612 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.277617 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "mod, params = _test_concat([(1, 3, 10, 10), (1, 2, 10, 10)], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>data1) {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> (<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>data1);\n",
       "  concatenate(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>, axis<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">1</span>)\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:11.297618 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/concat.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/concat\"\n",
      "I0914 09:12:11.297690 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/concat.prototxt\n",
      "I0914 09:12:11.297744 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 2\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Concat\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  concat_param {\n",
      "    axis: 0\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.297775 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.297782 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.297786 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.297796 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.297799 1956768 net.cpp:131] Top shape: 3 10 10 (300)\n",
      "I0914 09:12:11.297804 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.297807 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:11.297812 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:11.297816 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:11.297822 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:11.297825 1956768 net.cpp:131] Top shape: 2 10 10 (200)\n",
      "I0914 09:12:11.297829 1956768 net.cpp:139] Memory required for data: 2000\n",
      "I0914 09:12:11.297832 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.297837 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.297842 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.297844 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:11.297849 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.297855 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.297858 1956768 net.cpp:131] Top shape: 5 10 10 (500)\n",
      "I0914 09:12:11.297863 1956768 net.cpp:139] Memory required for data: 4000\n",
      "I0914 09:12:11.297865 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.297869 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:11.297873 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.297875 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.297880 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:11.297889 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:11.298053 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:11.298061 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:11.298064 1956768 _caffe.cpp:142] Net('./.temp/concat.prototxt', 1, weights='./.temp/concat.caffemodel')\n",
      "I0914 09:12:11.298123 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 2\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Concat\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  concat_param {\n",
      "    axis: 0\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.298153 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.298161 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.298164 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.298172 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.298177 1956768 net.cpp:131] Top shape: 3 10 10 (300)\n",
      "I0914 09:12:11.298180 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.298184 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:11.298188 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:11.298192 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:11.298198 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:11.298202 1956768 net.cpp:131] Top shape: 2 10 10 (200)\n",
      "I0914 09:12:11.298205 1956768 net.cpp:139] Memory required for data: 2000\n",
      "I0914 09:12:11.298208 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.298215 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.298218 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.298223 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:11.298228 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.298233 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.298236 1956768 net.cpp:131] Top shape: 5 10 10 (500)\n",
      "I0914 09:12:11.298240 1956768 net.cpp:139] Memory required for data: 4000\n",
      "I0914 09:12:11.298243 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.298246 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:11.298249 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.298252 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.298256 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "mod, params = _test_concat([(3, 10, 10), (2, 10, 10)], axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>data1) {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> (<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>data1);\n",
       "  concatenate(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>)\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Convolution"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_conv2d(shape_list,  op_name=\"conv2d\", **kwargs):\n",
    "    proto_file = f\"{root_dir}/{op_name}.prototxt\"\n",
    "    blob_file = f\"{root_dir}/{op_name}.caffemodel\"\n",
    "    solver_file = f\"{root_dir}/{op_name}_solver.prototxt\"\n",
    "    n_netspec = miso_op(shape_list, L.Convolution, **kwargs)\n",
    "    # obtain the .caffemodel file and .prototxt file\n",
    "    gen_model_files(n_netspec, proto_file, blob_file, solver_file)\n",
    "    # run model in Caffe\n",
    "    data = [np.random.rand(*shape).astype(np.float32) for shape in shape_list]\n",
    "    caffe_out = run_caffe(data, proto_file, blob_file)\n",
    "    init_net = pb.NetParameter()\n",
    "    predict_net = pb.NetParameter()\n",
    "    # load model\n",
    "    with open(proto_file, \"r\") as f:\n",
    "        text_format.Merge(f.read(), predict_net)\n",
    "    # load blob\n",
    "    with open(blob_file, \"rb\") as f:\n",
    "        init_net.ParseFromString(f.read())\n",
    "    shape_dict = [{f\"data_{k}\": shape} for k, shape in enumerate(shape_list)]\n",
    "    dtype_dict = [{f\"data_{k}\": \"float32\"} for k, _ in enumerate(shape_list)]\n",
    "    mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)\n",
    "    return mod, params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:11.327899 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/conv2d.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/conv2d\"\n",
      "I0914 09:12:11.327972 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt\n",
      "I0914 09:12:11.328034 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    pad: 0\n",
      "    kernel_size: 3\n",
      "    stride: 2\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.328068 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.328075 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.328079 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.328088 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.328092 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.328097 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.328100 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.328121 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.328126 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.328131 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.328189 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.328195 1956768 net.cpp:131] Top shape: 1 20 4 4 (320)\n",
      "I0914 09:12:11.328200 1956768 net.cpp:139] Memory required for data: 2480\n",
      "I0914 09:12:11.328207 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.328210 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.328213 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.328218 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:11.328227 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:11.328648 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:11.328657 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:11.328660 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')\n",
      "I0914 09:12:11.328723 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    pad: 0\n",
      "    kernel_size: 3\n",
      "    stride: 2\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.328754 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.328761 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.328765 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.328773 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.328778 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.328781 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.328785 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.328792 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.328795 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.328800 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.328820 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.328824 1956768 net.cpp:131] Top shape: 1 20 4 4 (320)\n",
      "I0914 09:12:11.328828 1956768 net.cpp:139] Memory required for data: 2480\n",
      "I0914 09:12:11.328835 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.328838 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.328841 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.328845 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "shape_list = [(1, 3, 10, 10)]\n",
    "mod, params = _test_conv2d(\n",
    "    shape_list,\n",
    "    num_output=20,\n",
    "    bias_term=True,\n",
    "    pad=0,\n",
    "    kernel_size=3,\n",
    "    stride=2,\n",
    "    dilation=1,\n",
    "    weight_filler=dict(type=\"xavier\"),\n",
    "    bias_filler=dict(type=\"xavier\"),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1: Tensor[(<span style=\"color: #008000\">20</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), float32], <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2: Tensor[(<span style=\"color: #008000\">20</span>), float32]) {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>conv2d(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1, strides<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">2</span>], padding<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>], channels<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">20</span>, kernel_size<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>]);\n",
       "  nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>bias_add(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2)\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:11.350931 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/conv2d.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/conv2d\"\n",
      "I0914 09:12:11.351027 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt\n",
      "I0914 09:12:11.351083 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: false\n",
      "    pad: 1\n",
      "    pad: 2\n",
      "    kernel_size: 3\n",
      "    stride: 2\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.351116 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.351125 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.351128 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.351137 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.351141 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.351146 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.351150 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.351156 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.351161 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.351166 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.351183 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.351187 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.351191 1956768 net.cpp:139] Memory required for data: 3600\n",
      "I0914 09:12:11.351197 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.351200 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.351203 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.351207 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:11.351217 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:11.351402 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:11.351411 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:11.351414 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')\n",
      "I0914 09:12:11.351477 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: false\n",
      "    pad: 1\n",
      "    pad: 2\n",
      "    kernel_size: 3\n",
      "    stride: 2\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.351508 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.351516 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.351519 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.351527 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.351531 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.351536 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.351538 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.351545 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.351549 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.351553 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.351572 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.351575 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.351579 1956768 net.cpp:139] Memory required for data: 3600\n",
      "I0914 09:12:11.351584 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.351588 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.351590 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.351595 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "mod, params = _test_conv2d(\n",
    "    shape_list,\n",
    "    num_output=20,\n",
    "    bias_term=False,\n",
    "    pad=[1, 2],\n",
    "    kernel_size=3,\n",
    "    stride=2,\n",
    "    dilation=1,\n",
    "    weight_filler=dict(type=\"xavier\"),\n",
    "    bias_filler=dict(type=\"xavier\"),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1: Tensor[(<span style=\"color: #008000\">20</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), float32]) {\n",
       "  nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>conv2d(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1, strides<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">2</span>], padding<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], channels<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">20</span>, kernel_size<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>])\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:11.373440 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/conv2d.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/conv2d\"\n",
      "I0914 09:12:11.373510 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt\n",
      "I0914 09:12:11.373567 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    pad: 1\n",
      "    pad: 2\n",
      "    kernel_size: 3\n",
      "    kernel_size: 5\n",
      "    stride: 2\n",
      "    stride: 1\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "    dilation: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.373600 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.373607 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.373612 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.373620 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.373625 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.373629 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.373633 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.373639 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.373643 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.373648 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.373670 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.373674 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.373678 1956768 net.cpp:139] Memory required for data: 3600\n",
      "I0914 09:12:11.373685 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.373688 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.373692 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.373696 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:11.373704 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:11.373950 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:11.373958 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:11.373962 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')\n",
      "I0914 09:12:11.374025 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    pad: 1\n",
      "    pad: 2\n",
      "    kernel_size: 3\n",
      "    kernel_size: 5\n",
      "    stride: 2\n",
      "    stride: 1\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "    dilation: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.374056 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.374063 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.374068 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.374075 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.374079 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.374084 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.374089 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.374094 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.374099 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.374104 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.374125 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.374128 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.374132 1956768 net.cpp:139] Memory required for data: 3600\n",
      "I0914 09:12:11.374138 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.374142 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.374145 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.374150 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "mod, params = _test_conv2d(\n",
    "    shape_list,\n",
    "    num_output=20,\n",
    "    bias_term=True,\n",
    "    pad=[1, 2],\n",
    "    kernel_size=[3, 5],\n",
    "    stride=[2, 1],\n",
    "    dilation=[1, 2],\n",
    "    weight_filler=dict(type=\"xavier\"),\n",
    "    bias_filler=dict(type=\"xavier\"),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1: Tensor[(<span style=\"color: #008000\">20</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">5</span>), float32], <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2: Tensor[(<span style=\"color: #008000\">20</span>), float32]) {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>conv2d(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1, strides<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">1</span>], padding<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], dilation<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], channels<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">20</span>, kernel_size<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">5</span>]);\n",
       "  nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>bias_add(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2)\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:11.396004 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/conv2d.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/conv2d\"\n",
      "I0914 09:12:11.396075 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt\n",
      "I0914 09:12:11.396133 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    pad_h: 1\n",
      "    pad_w: 2\n",
      "    kernel_h: 3\n",
      "    kernel_w: 5\n",
      "    stride_h: 2\n",
      "    stride_w: 1\n",
      "    dilation: 1\n",
      "    dilation: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.396165 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.396173 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.396178 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.396186 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.396190 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.396195 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.396198 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.396205 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.396209 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.396214 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.396234 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.396239 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.396243 1956768 net.cpp:139] Memory required for data: 3600\n",
      "I0914 09:12:11.396250 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.396252 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.396255 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.396260 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:11.396268 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:11.396432 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:11.396442 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:11.396445 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')\n",
      "I0914 09:12:11.396507 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 3\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    pad_h: 1\n",
      "    pad_w: 2\n",
      "    kernel_h: 3\n",
      "    kernel_w: 5\n",
      "    stride_h: 2\n",
      "    stride_w: 1\n",
      "    dilation: 1\n",
      "    dilation: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.396538 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.396545 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.396549 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.396557 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.396561 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)\n",
      "I0914 09:12:11.396565 1956768 net.cpp:139] Memory required for data: 1200\n",
      "I0914 09:12:11.396569 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.396575 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.396579 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.396584 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.396605 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.396608 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.396612 1956768 net.cpp:139] Memory required for data: 3600\n",
      "I0914 09:12:11.396618 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.396621 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.396624 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.396628 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "mod, params = _test_conv2d(\n",
    "    shape_list,\n",
    "    num_output=20,\n",
    "    bias_term=True,\n",
    "    pad_h=1,\n",
    "    pad_w=2,\n",
    "    kernel_h=3,\n",
    "    kernel_w=5,\n",
    "    stride_h=2,\n",
    "    stride_w=1,\n",
    "    dilation=[1, 2],\n",
    "    weight_filler=dict(type=\"xavier\"),\n",
    "    bias_filler=dict(type=\"xavier\"),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1: Tensor[(<span style=\"color: #008000\">20</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">5</span>), float32], <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2: Tensor[(<span style=\"color: #008000\">20</span>), float32]) {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>conv2d(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1, strides<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">1</span>], padding<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], dilation<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], channels<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">20</span>, kernel_size<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">5</span>]);\n",
       "  nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>bias_add(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2)\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:11.421512 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"./.temp/conv2d.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"./.temp/conv2d\"\n",
      "I0914 09:12:11.421586 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt\n",
      "I0914 09:12:11.421649 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 2\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    pad: 1\n",
      "    pad: 2\n",
      "    kernel_size: 3\n",
      "    kernel_size: 5\n",
      "    group: 2\n",
      "    stride: 2\n",
      "    stride: 1\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "    dilation: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.421682 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.421690 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.421695 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.421705 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.421708 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)\n",
      "I0914 09:12:11.421712 1956768 net.cpp:139] Memory required for data: 800\n",
      "I0914 09:12:11.421716 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.421723 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.421726 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.421731 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.421751 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.421754 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.421758 1956768 net.cpp:139] Memory required for data: 3200\n",
      "I0914 09:12:11.421765 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.421769 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.421772 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.421777 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:11.421784 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:11.421972 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:11.421981 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:11.421985 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')\n",
      "I0914 09:12:11.422055 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 1\n",
      "      dim: 2\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Convolution\"\n",
      "  bottom: \"data0\"\n",
      "  top: \"output\"\n",
      "  convolution_param {\n",
      "    num_output: 20\n",
      "    bias_term: true\n",
      "    pad: 1\n",
      "    pad: 2\n",
      "    kernel_size: 3\n",
      "    kernel_size: 5\n",
      "    group: 2\n",
      "    stride: 2\n",
      "    stride: 1\n",
      "    weight_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    bias_filler {\n",
      "      type: \"xavier\"\n",
      "    }\n",
      "    dilation: 1\n",
      "    dilation: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:11.422086 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:11.422092 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:11.422096 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:11.422104 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:11.422108 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)\n",
      "I0914 09:12:11.422112 1956768 net.cpp:139] Memory required for data: 800\n",
      "I0914 09:12:11.422116 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:11.422122 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:11.422127 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:11.422132 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:11.422149 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:11.422153 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)\n",
      "I0914 09:12:11.422158 1956768 net.cpp:139] Memory required for data: 3200\n",
      "I0914 09:12:11.422163 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:11.422168 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:11.422170 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:11.422174 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "mod, params = _test_conv2d(\n",
    "    [(1, 2, 10, 10)],\n",
    "    num_output=20,\n",
    "    bias_term=True,\n",
    "    pad=[1, 2],\n",
    "    kernel_size=[3, 5],\n",
    "    stride=[2, 1],\n",
    "    dilation=[1, 2],\n",
    "    weight_filler=dict(type=\"xavier\"),\n",
    "    bias_filler=dict(type=\"xavier\"),\n",
    "    group=2,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1: Tensor[(<span style=\"color: #008000\">20</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">5</span>), float32], <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2: Tensor[(<span style=\"color: #008000\">20</span>), float32]) {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>conv2d(<span style=\"color: #AA22FF; font-weight: bold\">%</span>data0, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_1, strides<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">1</span>], padding<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], dilation<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], groups<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">2</span>, channels<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">20</span>, kernel_size<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">5</span>]);\n",
       "  nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>bias_add(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>, <span style=\"color: #AA22FF; font-weight: bold\">%</span>v_param_2)\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe crop"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "from caffe_utils import _test_op\n",
    "\n",
    "def _test_crop(data, **kwargs):\n",
    "    \"\"\"One iteration of Crop\"\"\"\n",
    "    _test_op(data, L.Crop, \"Crop\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Crop():\n",
    "    \"\"\"Crop\"\"\"\n",
    "    _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)])\n",
    "    _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1)\n",
    "    _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=2)\n",
    "    _test_crop(\n",
    "        [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=[1, 2, 4]\n",
    "    )\n",
    "    _test_crop(\n",
    "        [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=2, offset=[2, 4]\n",
    "    )\n",
    "    _test_crop([np.random.rand(10, 120, 120), np.random.rand(5, 50, 60)], axis=1, offset=[2, 4])\n",
    "    _test_crop([np.random.rand(120, 120), np.random.rand(50, 60)], axis=0, offset=[2, 4])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0914 09:12:30.343927 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60\"\n",
      "I0914 09:12:30.344040 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.prototxt\n",
      "I0914 09:12:30.344126 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "}\n",
      "I0914 09:12:30.344178 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:30.344192 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:30.344200 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:30.344216 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:30.344221 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:30.344230 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:30.344236 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:30.344244 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:30.344250 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:30.344260 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:30.344265 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:30.344271 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:30.344276 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:30.344298 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:30.344303 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:30.344309 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:30.344316 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:30.344332 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:30.344336 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)\n",
      "I0914 09:12:30.344343 1956768 net.cpp:139] Memory required for data: 7560000\n",
      "I0914 09:12:30.344348 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:30.344352 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:30.344357 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:30.344362 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:30.344368 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:30.344383 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:30.344502 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:30.344512 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:30.344517 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.caffemodel')\n",
      "I0914 09:12:30.344587 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "}\n",
      "I0914 09:12:30.344625 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:30.344635 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:30.344640 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:30.344650 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:30.344655 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:30.344663 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:30.344668 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:30.344676 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:30.344681 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:30.344688 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:30.344692 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:30.344700 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:30.344705 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:30.344712 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:30.344717 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:30.344722 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:30.344727 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:30.344738 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:30.344743 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)\n",
      "I0914 09:12:30.344748 1956768 net.cpp:139] Memory required for data: 7560000\n",
      "I0914 09:12:30.344753 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:30.344758 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:30.344761 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:30.344765 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:30.344771 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:31.032979 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1\"\n",
      "I0914 09:12:31.033103 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.prototxt\n",
      "I0914 09:12:31.033195 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:31.033250 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:31.033263 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:31.033272 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:31.033289 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:31.033295 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:31.033304 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:31.033313 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:31.033320 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:31.033326 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:31.033335 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:31.033339 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.033346 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:31.033350 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:31.033358 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:31.033363 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:31.033370 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:31.033376 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:31.033390 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:31.033396 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.033401 1956768 net.cpp:139] Memory required for data: 6960000\n",
      "I0914 09:12:31.033406 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:31.033411 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:31.033416 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:31.033421 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:31.033427 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:31.033443 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:31.033558 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:31.033568 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:31.033573 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.caffemodel')\n",
      "I0914 09:12:31.033648 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "  }\n",
      "}\n",
      "I0914 09:12:31.033689 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:31.033696 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:31.033702 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:31.033713 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:31.033718 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:31.033725 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:31.033731 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:31.033737 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:31.033744 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:31.033752 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:31.033758 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.033764 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:31.033769 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:31.033775 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:31.033780 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:31.033785 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:31.033792 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:31.033803 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:31.033808 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.033814 1956768 net.cpp:139] Memory required for data: 6960000\n",
      "I0914 09:12:31.033819 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:31.033824 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:31.033828 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:31.033833 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:31.033839 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:31.330210 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2\"\n",
      "I0914 09:12:31.330330 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.prototxt\n",
      "I0914 09:12:31.330410 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "    offset: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:31.330456 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:31.330467 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:31.330474 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:31.330490 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:31.330494 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:31.330502 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:31.330507 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:31.330513 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:31.330518 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:31.330528 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:31.330530 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.330536 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:31.330540 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:31.330546 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:31.330551 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:31.330555 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:31.330561 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:31.330574 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:31.330577 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.330582 1956768 net.cpp:139] Memory required for data: 6960000\n",
      "I0914 09:12:31.330586 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:31.330590 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:31.330595 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:31.330598 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:31.330605 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:31.330617 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:31.330727 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:31.330736 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:31.330740 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.caffemodel')\n",
      "I0914 09:12:31.330806 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "    offset: 2\n",
      "  }\n",
      "}\n",
      "I0914 09:12:31.330840 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:31.330848 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:31.330853 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:31.330863 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:31.330868 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:31.330873 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:31.330878 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:31.330883 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:31.330888 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:31.330896 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:31.330900 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.330905 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:31.330909 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:31.330914 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:31.330919 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:31.330922 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:31.330929 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:31.330937 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:31.330941 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.340920 1956768 net.cpp:139] Memory required for data: 6960000\n",
      "I0914 09:12:31.340931 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:31.340935 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:31.340939 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:31.340942 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:31.340947 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:31.659914 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4\"\n",
      "I0914 09:12:31.660022 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.prototxt\n",
      "I0914 09:12:31.660095 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "    offset: 1\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:31.660136 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:31.660146 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:31.660152 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:31.660166 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:31.660171 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:31.660177 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:31.660181 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:31.660187 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:31.660192 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:31.660199 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:31.660202 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.660207 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:31.660210 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:31.660215 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:31.660219 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:31.660224 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:31.660228 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:31.660239 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:31.660243 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.660247 1956768 net.cpp:139] Memory required for data: 6960000\n",
      "I0914 09:12:31.660250 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:31.660254 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:31.660257 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:31.660260 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:31.660265 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:31.660279 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:31.660377 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:31.660384 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:31.660387 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.caffemodel')\n",
      "I0914 09:12:31.660446 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "    offset: 1\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:31.660477 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:31.660483 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:31.660488 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:31.660496 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:31.660501 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:31.660506 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:31.660509 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:31.660514 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:31.660521 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:31.660526 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:31.660530 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.660534 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:31.660537 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:31.660542 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:31.660545 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:31.660549 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:31.660553 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:31.660562 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:31.660564 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:31.660569 1956768 net.cpp:139] Memory required for data: 6960000\n",
      "I0914 09:12:31.660573 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:31.660575 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:31.660578 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:31.660581 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:31.660585 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:32.000172 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4\"\n",
      "I0914 09:12:32.000275 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.prototxt\n",
      "I0914 09:12:32.000347 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 2\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:32.000389 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:32.000399 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:32.000406 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:32.000419 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:32.000423 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:32.000430 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:32.000433 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:32.000439 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:32.000444 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:32.000450 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:32.000454 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:32.000459 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:32.000463 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:32.000468 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:32.000473 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:32.000476 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:32.000481 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:32.000491 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:32.000495 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)\n",
      "I0914 09:12:32.000500 1956768 net.cpp:139] Memory required for data: 7560000\n",
      "I0914 09:12:32.000504 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:32.000507 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:32.000510 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:32.000514 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:32.000519 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:32.000531 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:32.000640 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:32.000648 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:32.000651 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.caffemodel')\n",
      "I0914 09:12:32.000711 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 2\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:32.000746 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:32.000751 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:32.000756 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:32.000764 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:32.000768 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)\n",
      "I0914 09:12:32.000773 1956768 net.cpp:139] Memory required for data: 5760000\n",
      "I0914 09:12:32.000777 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:32.000782 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:32.000788 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:32.000793 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:32.000797 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)\n",
      "I0914 09:12:32.000802 1956768 net.cpp:139] Memory required for data: 6360000\n",
      "I0914 09:12:32.000805 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:32.000809 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:32.000813 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:32.000818 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:32.000821 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:32.000829 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:32.000833 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)\n",
      "I0914 09:12:32.000837 1956768 net.cpp:139] Memory required for data: 7560000\n",
      "I0914 09:12:32.000840 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:32.000844 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:32.000847 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:32.000850 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:32.000854 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:32.305437 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4\"\n",
      "I0914 09:12:32.305541 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.prototxt\n",
      "I0914 09:12:32.305613 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:32.305655 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:32.305665 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:32.305670 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:32.305683 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:32.305688 1956768 net.cpp:131] Top shape: 10 120 120 (144000)\n",
      "I0914 09:12:32.305694 1956768 net.cpp:139] Memory required for data: 576000\n",
      "I0914 09:12:32.305697 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:32.305703 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:32.305707 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:32.305714 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:32.305717 1956768 net.cpp:131] Top shape: 5 50 60 (15000)\n",
      "I0914 09:12:32.305721 1956768 net.cpp:139] Memory required for data: 636000\n",
      "I0914 09:12:32.305724 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:32.305730 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:32.305734 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:32.305738 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:32.305743 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:32.305752 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:32.305756 1956768 net.cpp:131] Top shape: 10 50 60 (30000)\n",
      "I0914 09:12:32.305760 1956768 net.cpp:139] Memory required for data: 756000\n",
      "I0914 09:12:32.305764 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:32.305768 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:32.305770 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:32.305774 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:32.305779 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:32.305790 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:32.305888 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:32.305896 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:32.305899 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.caffemodel')\n",
      "I0914 09:12:32.305958 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 10\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 5\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 1\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:32.305989 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:32.305995 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:32.306000 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:32.306008 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:32.306011 1956768 net.cpp:131] Top shape: 10 120 120 (144000)\n",
      "I0914 09:12:32.306017 1956768 net.cpp:139] Memory required for data: 576000\n",
      "I0914 09:12:32.306020 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:32.306025 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:32.306030 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:32.306035 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:32.306041 1956768 net.cpp:131] Top shape: 5 50 60 (15000)\n",
      "I0914 09:12:32.306046 1956768 net.cpp:139] Memory required for data: 636000\n",
      "I0914 09:12:32.306048 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:32.306053 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:32.306056 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:32.306061 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:32.306064 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:32.306072 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:32.306077 1956768 net.cpp:131] Top shape: 10 50 60 (30000)\n",
      "I0914 09:12:32.306080 1956768 net.cpp:139] Memory required for data: 756000\n",
      "I0914 09:12:32.306083 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:32.306087 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:32.306090 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:32.306093 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:32.306097 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:32.559347 1956768 solver.cpp:45] Initializing solver from parameters: \n",
      "train_net: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.prototxt\"\n",
      "base_lr: 0.01\n",
      "display: 1\n",
      "max_iter: 100000\n",
      "lr_policy: \"inv\"\n",
      "gamma: 0.0001\n",
      "power: 0.75\n",
      "momentum: 0.9\n",
      "weight_decay: 0.0005\n",
      "snapshot: 100000\n",
      "snapshot_prefix: \"/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4\"\n",
      "I0914 09:12:32.559437 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.prototxt\n",
      "I0914 09:12:32.559511 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TRAIN\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 0\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:32.559547 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:32.559556 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:32.559561 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:32.559571 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:32.559574 1956768 net.cpp:131] Top shape: 120 120 (14400)\n",
      "I0914 09:12:32.559579 1956768 net.cpp:139] Memory required for data: 57600\n",
      "I0914 09:12:32.559583 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:32.559587 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:32.559592 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:32.559598 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:32.559602 1956768 net.cpp:131] Top shape: 50 60 (3000)\n",
      "I0914 09:12:32.559605 1956768 net.cpp:139] Memory required for data: 69600\n",
      "I0914 09:12:32.559608 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:32.559613 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:32.559617 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:32.559621 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:32.559625 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:32.559634 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:32.559638 1956768 net.cpp:131] Top shape: 50 60 (3000)\n",
      "I0914 09:12:32.559641 1956768 net.cpp:139] Memory required for data: 81600\n",
      "I0914 09:12:32.559644 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:32.559648 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:32.559651 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:32.559654 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:32.559659 1956768 net.cpp:257] Network initialization done.\n",
      "I0914 09:12:32.559669 1956768 solver.cpp:57] Solver scaffolding done.\n",
      "W0914 09:12:32.559760 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface\n",
      "W0914 09:12:32.559768 1956768 _caffe.cpp:140] Use this instead (with the named \"weights\" parameter):\n",
      "W0914 09:12:32.559772 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.caffemodel')\n",
      "I0914 09:12:32.559827 1956768 net.cpp:53] Initializing net from parameters: \n",
      "state {\n",
      "  phase: TEST\n",
      "  level: 0\n",
      "}\n",
      "layer {\n",
      "  name: \"data0\"\n",
      "  type: \"Input\"\n",
      "  top: \"data0\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 120\n",
      "      dim: 120\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"data1\"\n",
      "  type: \"Input\"\n",
      "  top: \"data1\"\n",
      "  input_param {\n",
      "    shape {\n",
      "      dim: 50\n",
      "      dim: 60\n",
      "    }\n",
      "  }\n",
      "}\n",
      "layer {\n",
      "  name: \"output\"\n",
      "  type: \"Crop\"\n",
      "  bottom: \"data0\"\n",
      "  bottom: \"data1\"\n",
      "  top: \"output\"\n",
      "  crop_param {\n",
      "    axis: 0\n",
      "    offset: 2\n",
      "    offset: 4\n",
      "  }\n",
      "}\n",
      "I0914 09:12:32.559859 1956768 layer_factory.hpp:77] Creating layer data0\n",
      "I0914 09:12:32.559864 1956768 net.cpp:86] Creating Layer data0\n",
      "I0914 09:12:32.559870 1956768 net.cpp:382] data0 -> data0\n",
      "I0914 09:12:32.559876 1956768 net.cpp:124] Setting up data0\n",
      "I0914 09:12:32.559880 1956768 net.cpp:131] Top shape: 120 120 (14400)\n",
      "I0914 09:12:32.559885 1956768 net.cpp:139] Memory required for data: 57600\n",
      "I0914 09:12:32.559888 1956768 layer_factory.hpp:77] Creating layer data1\n",
      "I0914 09:12:32.559892 1956768 net.cpp:86] Creating Layer data1\n",
      "I0914 09:12:32.559896 1956768 net.cpp:382] data1 -> data1\n",
      "I0914 09:12:32.559901 1956768 net.cpp:124] Setting up data1\n",
      "I0914 09:12:32.559906 1956768 net.cpp:131] Top shape: 50 60 (3000)\n",
      "I0914 09:12:32.559911 1956768 net.cpp:139] Memory required for data: 69600\n",
      "I0914 09:12:32.559914 1956768 layer_factory.hpp:77] Creating layer output\n",
      "I0914 09:12:32.559918 1956768 net.cpp:86] Creating Layer output\n",
      "I0914 09:12:32.559922 1956768 net.cpp:408] output <- data0\n",
      "I0914 09:12:32.559926 1956768 net.cpp:408] output <- data1\n",
      "I0914 09:12:32.559931 1956768 net.cpp:382] output -> output\n",
      "I0914 09:12:32.559937 1956768 net.cpp:124] Setting up output\n",
      "I0914 09:12:32.559942 1956768 net.cpp:131] Top shape: 50 60 (3000)\n",
      "I0914 09:12:32.559944 1956768 net.cpp:139] Memory required for data: 81600\n",
      "I0914 09:12:32.559947 1956768 net.cpp:202] output does not need backward computation.\n",
      "I0914 09:12:32.559952 1956768 net.cpp:202] data1 does not need backward computation.\n",
      "I0914 09:12:32.559954 1956768 net.cpp:202] data0 does not need backward computation.\n",
      "I0914 09:12:32.559957 1956768 net.cpp:244] This network produces output output\n",
      "I0914 09:12:32.559962 1956768 net.cpp:257] Network initialization done.\n"
     ]
    }
   ],
   "source": [
    "test_forward_Crop()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Deconvolution"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_deconvolution(data, **kwargs):\n",
    "    \"\"\"One iteration of Deconvolution\"\"\"\n",
    "    _test_op(data, L.Deconvolution, \"Deconvolution\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Deconvolution():\n",
    "    \"\"\"Deconvolution\"\"\"\n",
    "    data = np.random.rand(1, 16, 32, 32).astype(np.float32)\n",
    "    _test_deconvolution(\n",
    "        data,\n",
    "        convolution_param=dict(\n",
    "            num_output=20,\n",
    "            bias_term=True,\n",
    "            pad=0,\n",
    "            kernel_size=3,\n",
    "            stride=2,\n",
    "            dilation=1,\n",
    "            weight_filler=dict(type=\"xavier\"),\n",
    "            bias_filler=dict(type=\"xavier\"),\n",
    "        ),\n",
    "    )\n",
    "    _test_deconvolution(\n",
    "        data,\n",
    "        convolution_param=dict(\n",
    "            num_output=20,\n",
    "            bias_term=False,\n",
    "            pad=[1, 2],\n",
    "            kernel_size=3,\n",
    "            stride=2,\n",
    "            dilation=1,\n",
    "            weight_filler=dict(type=\"xavier\"),\n",
    "            bias_filler=dict(type=\"xavier\"),\n",
    "        ),\n",
    "    )\n",
    "    _test_deconvolution(\n",
    "        data,\n",
    "        convolution_param=dict(\n",
    "            num_output=20,\n",
    "            bias_term=True,\n",
    "            pad_h=1,\n",
    "            pad_w=2,\n",
    "            kernel_h=3,\n",
    "            kernel_w=5,\n",
    "            stride_h=2,\n",
    "            stride_w=1,\n",
    "            dilation=1,\n",
    "            weight_filler=dict(type=\"xavier\"),\n",
    "            bias_filler=dict(type=\"xavier\"),\n",
    "        ),\n",
    "    )\n",
    "    _test_deconvolution(\n",
    "        data,\n",
    "        convolution_param=dict(\n",
    "            num_output=16,\n",
    "            bias_term=False,\n",
    "            pad=0,\n",
    "            kernel_size=2,\n",
    "            stride=2,\n",
    "            dilation=1,\n",
    "            group=16,\n",
    "            weight_filler=dict(type=\"xavier\"),\n",
    "            bias_filler=dict(type=\"xavier\"),\n",
    "        ),\n",
    "    )\n",
    "    data = np.random.rand(1, 100, 32, 32).astype(np.float32)\n",
    "    _test_deconvolution(\n",
    "        data,\n",
    "        convolution_param=dict(\n",
    "            num_output=100,\n",
    "            bias_term=False,\n",
    "            pad=0,\n",
    "            kernel_size=2,\n",
    "            stride=2,\n",
    "            dilation=1,\n",
    "            group=100,\n",
    "            weight_filler=dict(type=\"xavier\"),\n",
    "            bias_filler=dict(type=\"xavier\"),\n",
    "        ),\n",
    "    )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Dropout"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_dropout(data, **kwargs):\n",
    "    \"\"\"One iteration of Dropout\"\"\"\n",
    "    _test_op(data, L.Dropout, \"Dropout\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Dropout():\n",
    "    \"\"\"Dropout\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_dropout(data)\n",
    "    _test_dropout(data, dropout_ratio=0.7)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Eltwise"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_eltwise(data_list, **kwargs):\n",
    "    \"\"\"One iteration of Eltwise\"\"\"\n",
    "    _test_op(data_list, L.Eltwise, \"Eltwise\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Eltwise():\n",
    "    \"\"\"Eltwise\"\"\"\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=0,\n",
    "    )\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=1,\n",
    "    )\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=2,\n",
    "    )\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=1,\n",
    "        coeff=[0.5, 1],\n",
    "    )\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=0,\n",
    "    )\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=1,\n",
    "    )\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=2,\n",
    "    )\n",
    "    _test_eltwise(\n",
    "        [\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "            np.random.rand(1, 3, 10, 11).astype(np.float32),\n",
    "        ],\n",
    "        operation=1,\n",
    "        coeff=[0.5, 1, 0.2, 1.8, 3.1, 0.1],\n",
    "    )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Flatten"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_flatten(data, axis=1):\n",
    "    \"\"\"One iteration of Flatten\"\"\"\n",
    "    _test_op(data, L.Flatten, \"Flatten\", axis=axis)\n",
    "\n",
    "\n",
    "def test_forward_Flatten():\n",
    "    \"\"\"Flatten\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_flatten(data)\n",
    "    _test_flatten(data, axis=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe InnerProduct"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_inner_product(data, **kwargs):\n",
    "    \"\"\"One iteration of InnerProduct\"\"\"\n",
    "    _test_op(data, L.InnerProduct, \"InnerProduct\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_InnerProduct():\n",
    "    \"\"\"InnerProduct\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10)\n",
    "    _test_inner_product(data, num_output=20, bias_term=False, weight_filler=dict(type=\"xavier\"))\n",
    "    _test_inner_product(\n",
    "        data,\n",
    "        num_output=20,\n",
    "        bias_term=True,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    _test_inner_product(\n",
    "        np.random.rand(20, 10).astype(np.float32),\n",
    "        num_output=30,\n",
    "        bias_term=True,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe LRN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def _test_lrn(data, local_size=5, alpha=1.0, beta=0.75, k=1.0):\n",
    "    \"\"\"One iteration of LRN\"\"\"\n",
    "    _test_op(data, L.LRN, \"LRN\", local_size=local_size, alpha=alpha, beta=beta, k=k)\n",
    "\n",
    "\n",
    "def test_forward_LRN():\n",
    "    \"\"\"LRN\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_lrn(data)\n",
    "    _test_lrn(data, local_size=3)\n",
    "    _test_lrn(data, local_size=3, alpha=2.0)\n",
    "    _test_lrn(\n",
    "        data,\n",
    "        local_size=3,\n",
    "        alpha=2.0,\n",
    "        beta=0.5,\n",
    "    )\n",
    "    _test_lrn(data, local_size=3, alpha=2.0, beta=0.5, k=2.0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Permute"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def _test_permute(data, **kwargs):\n",
    "    \"\"\"One iteration of Permute.\"\"\"\n",
    "    _test_op(data, L.Permute, \"Permute\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Permute():\n",
    "    \"\"\"Permute\"\"\"\n",
    "    data = np.random.rand(2, 3, 4).astype(np.float32)\n",
    "    _test_permute(data, permute_param={\"order\": [0, 1, 2]})\n",
    "    _test_permute(data, permute_param={\"order\": [0, 2, 1]})\n",
    "    _test_permute(data, permute_param={\"order\": [1, 0, 2]})\n",
    "    _test_permute(data, permute_param={\"order\": [1, 2, 0]})\n",
    "    _test_permute(data, permute_param={\"order\": [2, 0, 1]})\n",
    "    _test_permute(data, permute_param={\"order\": [2, 1, 0]})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Pooling"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_pooling(data, **kwargs):\n",
    "    \"\"\"One iteration of Pooling.\"\"\"\n",
    "    _test_op(data, L.Pooling, \"Pooling\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Pooling():\n",
    "    \"\"\"Pooing\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    # MAX Pooling\n",
    "    _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.MAX)\n",
    "    _test_pooling(\n",
    "        data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.MAX\n",
    "    )\n",
    "    _test_pooling(data, pool=P.Pooling.MAX, global_pooling=True)\n",
    "\n",
    "    # AVE Pooing\n",
    "    _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.AVE)\n",
    "    _test_pooling(\n",
    "        data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.AVE\n",
    "    )\n",
    "    _test_pooling(data, pool=P.Pooling.AVE, global_pooling=True)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Power"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_power(data, **kwargs):\n",
    "    \"\"\"One iteration of Power.\"\"\"\n",
    "    _test_op(data, L.Power, \"Power\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Power():\n",
    "    \"\"\"Power\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_power(data, power_param={\"power\": 0.37, \"scale\": 0.83, \"shift\": -2.4})\n",
    "    _test_power(data, power_param={\"power\": 0.37, \"scale\": 0.83, \"shift\": 0.0})\n",
    "    _test_power(data, power_param={\"power\": 0.0, \"scale\": 0.83, \"shift\": -2.4})\n",
    "    _test_power(data, power_param={\"power\": 1.0, \"scale\": 0.83, \"shift\": -2.4})\n",
    "    _test_power(data, power_param={\"power\": 2.0, \"scale\": 0.34, \"shift\": -2.4})\n",
    "    _test_power(data, power_param={\"power\": 1.0, \"scale\": 1.0, \"shift\": 0.0})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe PReLU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_prelu(data, **kwargs):\n",
    "    \"\"\"One iteration of PReLU.\"\"\"\n",
    "    _test_op(data, L.PReLU, \"PReLU\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_PReLU():\n",
    "    \"\"\"PReLU\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_prelu(data, filler=dict(type=\"constant\", value=0.5))\n",
    "    _test_prelu(data)\n",
    "    _test_prelu(np.random.rand(10, 20).astype(np.float32))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe ReLU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_relu(data, **kwargs):\n",
    "    \"\"\"One iteration of ReLU.\"\"\"\n",
    "    _test_op(data, L.ReLU, \"ReLU\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_ReLU():\n",
    "    \"\"\"ReLU\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_relu(data)\n",
    "    _test_relu(np.random.rand(10, 20).astype(np.float32))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Reshape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_reshape(data, **kwargs):\n",
    "    \"\"\"One iteration of Reshape.\"\"\"\n",
    "    _test_op(data, L.Reshape, \"Reshape\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Reshape():\n",
    "    \"\"\"Reshape\"\"\"\n",
    "    data = np.random.rand(1, 8, 6).astype(np.float32)\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}})\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 0, 3]}})\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 0, -1]}})\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [0, -1]}})\n",
    "\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 3]}, \"axis\": 2})\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}, \"axis\": 1})\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}, \"axis\": -3})\n",
    "\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 4]}, \"axis\": 1, \"num_axes\": 1})\n",
    "    _test_reshape(data, reshape_param={\"shape\": {\"dim\": [3, 16]}, \"axis\": 1, \"num_axes\": 2})\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Scale"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_scale(data, **kwargs):\n",
    "    \"\"\"One iteration of Scale.\"\"\"\n",
    "    _test_op(data, L.Scale, \"Scale\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Scale():\n",
    "    \"\"\"Scale\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_scale(data, filler=dict(type=\"xavier\"))\n",
    "    _test_scale(data, filler=dict(type=\"xavier\"), bias_term=True, bias_filler=dict(type=\"xavier\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Sigmoid"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_sigmoid(data, **kwargs):\n",
    "    \"\"\"One iteration of Sigmoid.\"\"\"\n",
    "    _test_op(data, L.Sigmoid, \"Sigmoid\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Sigmoid():\n",
    "    \"\"\"Sigmoid\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_sigmoid(data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Slice"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_slice(data, **kwargs):\n",
    "    \"\"\"One iteration of Slice\"\"\"\n",
    "    _test_op(data, L.Slice, \"Slice\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Slice():\n",
    "    \"\"\"Slice\"\"\"\n",
    "    data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n",
    "    _test_slice(data, ntop=2, slice_param=dict(axis=1, slice_point=[1]))\n",
    "    _test_slice(data, ntop=2, slice_param=dict(axis=-1, slice_point=[1]))\n",
    "    _test_slice(data, ntop=3, slice_param=dict(axis=2, slice_point=[1, 6]))\n",
    "    _test_slice(data, ntop=3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Softmax"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_softmax(data, **kwargs):\n",
    "    \"\"\"One iteration of Softmax\"\"\"\n",
    "    _test_op(data, L.Softmax, \"Softmax\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Softmax():\n",
    "    \"\"\"Softmax\"\"\"\n",
    "    _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32))\n",
    "    _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32), axis=2)\n",
    "    _test_softmax(np.random.rand(10, 10).astype(np.float32), axis=0)\n",
    "    _test_softmax(np.random.rand(2, 10, 10).astype(np.float32), axis=1)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe TanH"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_tanh(data, **kwargs):\n",
    "    \"\"\"One iteration of TanH\"\"\"\n",
    "    _test_op(data, L.TanH, \"TanH\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_TanH():\n",
    "    \"\"\"TanH\"\"\"\n",
    "    _test_tanh(np.random.rand(1, 3, 10, 10).astype(np.float32))\n",
    "    _test_tanh(np.random.rand(3, 10, 10).astype(np.float32))\n",
    "    _test_tanh(np.random.rand(10, 10).astype(np.float32))\n",
    "    _test_tanh(np.random.rand(10).astype(np.float32))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Reduction "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def _test_reduction(data, **kwargs):\n",
    "    \"\"\"One iteration of Reduction\"\"\"\n",
    "    _test_op(data, L.Reduction, \"Reduction\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Reduction():\n",
    "    \"\"\"Reduction\"\"\"\n",
    "    reduction_op = {\"SUM\": 1, \"ASUM\": 2, \"SUMSQ\": 3, \"MEAN\": 4}\n",
    "    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op[\"SUM\"], axis=0)\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op[\"SUM\"], axis=3\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op[\"SUM\"], axis=1\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10).astype(np.float32), operation=reduction_op[\"SUM\"], axis=0, coeff=0.5\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40).astype(np.float32),\n",
    "        operation=reduction_op[\"SUM\"],\n",
    "        axis=3,\n",
    "        coeff=5.0,\n",
    "    )\n",
    "    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op[\"ASUM\"])\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20).astype(np.float32), operation=reduction_op[\"ASUM\"], axis=1\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op[\"ASUM\"], axis=3\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10).astype(np.float32), operation=reduction_op[\"ASUM\"], axis=0, coeff=0.0\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30).astype(np.float32),\n",
    "        operation=reduction_op[\"ASUM\"],\n",
    "        axis=2,\n",
    "        coeff=7.0,\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40, 10).astype(np.float32),\n",
    "        operation=reduction_op[\"ASUM\"],\n",
    "        axis=3,\n",
    "        coeff=1.0,\n",
    "    )\n",
    "    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op[\"SUMSQ\"], axis=0)\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op[\"SUMSQ\"], axis=3\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10).astype(np.float32), operation=reduction_op[\"SUMSQ\"], axis=0, coeff=0.0\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40, 50).astype(np.float32),\n",
    "        operation=reduction_op[\"SUMSQ\"],\n",
    "        axis=4,\n",
    "        coeff=2.0,\n",
    "    )\n",
    "    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op[\"MEAN\"], axis=0)\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op[\"MEAN\"], axis=3\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10).astype(np.float32), operation=reduction_op[\"MEAN\"], axis=0, coeff=0.0\n",
    "    )\n",
    "    _test_reduction(\n",
    "        np.random.rand(10, 20, 30, 40).astype(np.float32),\n",
    "        operation=reduction_op[\"MEAN\"],\n",
    "        axis=3,\n",
    "        coeff=2.0,\n",
    "    )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## caffe Embed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_embed(data, **kwargs):\n",
    "    \"\"\"One iteration of Embed\"\"\"\n",
    "    _test_op(data, L.Embed, \"Embed\", **kwargs)\n",
    "\n",
    "\n",
    "def test_forward_Embed():\n",
    "    \"\"\"Embed\"\"\"\n",
    "    k = 20\n",
    "    data = list(i for i in range(k))\n",
    "    np.random.shuffle(data)\n",
    "    # dimension is 1\n",
    "    data = np.asarray(data)\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=True,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=False,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    # dimension is 2\n",
    "    data = np.reshape(data, [4, 5])\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=True,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=False,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    # dimension is 3\n",
    "    data = np.reshape(data, [2, 2, 5])\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=True,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=False,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    # dimension is 4\n",
    "    data = np.reshape(data, [2, 2, 5, 1])\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=True,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "    _test_embed(\n",
    "        data,\n",
    "        num_output=30,\n",
    "        input_dim=k,\n",
    "        bias_term=False,\n",
    "        weight_filler=dict(type=\"xavier\"),\n",
    "        bias_filler=dict(type=\"xavier\"),\n",
    "    )\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "xmenv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
