{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# ONNX 测试样例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/media/pc/data/lxw/ai/tvm/xinetzone/tvm-book/doc/tutorials/frontend\n"
     ]
    }
   ],
   "source": [
    "%cd ..\n",
    "from utils.onnx_utils import (\n",
    "    get_input_data_shape_dict,\n",
    "    make_constant_node, get_onnxruntime_output,\n",
    "    get_tvm_output, get_tvm_output_with_vm,\n",
    "    verify_with_ort, verify_with_ort_with_inputs,\n",
    "    quantize_and_verify_with_ort\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# import torch\n",
    "# import torchvision\n",
    "# from torch import nn\n",
    "# # from torch.nn import Linear, Module, Sequential"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import onnx\n",
    "from onnx import TensorProto, helper, numpy_helper\n",
    "import tvm"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `reshape`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "in_shape = (4, 3, 3, 4)\n",
    "ref_shape = (6, 2, 4, 3)\n",
    "\n",
    "ref_array = np.array(ref_shape)\n",
    "ref_node = onnx.helper.make_node(\n",
    "    \"Constant\",\n",
    "    inputs=[],\n",
    "    outputs=[\"ref_in\"],\n",
    "    value=onnx.helper.make_tensor(\n",
    "        name=\"const_tensor\",\n",
    "        data_type=onnx.TensorProto.INT32,\n",
    "        dims=ref_array.shape,\n",
    "        vals=ref_array.flatten().astype(int),\n",
    "    ),\n",
    ")\n",
    "reshape_node = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out\"])\n",
    "\n",
    "graph = helper.make_graph(\n",
    "    [ref_node, reshape_node],\n",
    "    \"reshape_test\",\n",
    "    inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n",
    "    outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n",
    ")\n",
    "\n",
    "model = helper.make_model(graph, producer_name=\"reshape_test\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "target = \"llvm\"\n",
    "dev = tvm.cpu()\n",
    "x = np.random.uniform(size=in_shape).astype(\"int32\")\n",
    "tvm_out = get_tvm_output(model, x, target, dev, ref_shape, \"float32\")\n",
    "np.testing.assert_allclose(ref_shape, tvm_out.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `double_reshape`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "in_shape = (4, 3, 3, 4)\n",
    "ref_shape = (6, 2, 4, 3)\n",
    "\n",
    "ref_array = np.array(ref_shape)\n",
    "ref_node = onnx.helper.make_node(\n",
    "    \"Constant\",\n",
    "    inputs=[],\n",
    "    outputs=[\"ref_in\"],\n",
    "    value=onnx.helper.make_tensor(\n",
    "        name=\"const_tensor\",\n",
    "        data_type=onnx.TensorProto.INT32,\n",
    "        dims=ref_array.shape,\n",
    "        vals=ref_array.flatten().astype(int),\n",
    "    ),\n",
    ")\n",
    "reshape_node1 = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out1\"])\n",
    "reshape_node2 = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out2\"])\n",
    "add_node = helper.make_node(\"Add\", [\"out1\", \"out2\"], [\"out\"])\n",
    "\n",
    "graph = helper.make_graph(\n",
    "    [ref_node, reshape_node1, reshape_node2, add_node],\n",
    "    \"reshape_test\",\n",
    "    inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n",
    "    outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n",
    ")\n",
    "\n",
    "model = helper.make_model(graph, producer_name=\"reshape_test\")\n",
    "\n",
    "x = np.random.uniform(size=in_shape).astype(\"int32\")\n",
    "tvm_out = get_tvm_output(model, x, target, dev, ref_shape, \"float32\")\n",
    "np.testing.assert_allclose(ref_shape, tvm_out.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `expand`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_expand(name, data, shape, ref_data, dtype=\"int32\"):\n",
    "    shape_array = np.array(shape)\n",
    "    if dtype == \"int32\":\n",
    "        shape_node = onnx.helper.make_node(\n",
    "            \"Constant\",\n",
    "            inputs=[],\n",
    "            outputs=[\"shape\"],\n",
    "            value=onnx.helper.make_tensor(\n",
    "                name=\"const_tensor\",\n",
    "                data_type=onnx.TensorProto.INT32,\n",
    "                dims=shape_array.shape,\n",
    "                vals=shape_array.flatten().astype(\"int32\"),\n",
    "            ),\n",
    "        )\n",
    "    elif dtype == \"int64\":\n",
    "        shape_node = onnx.helper.make_node(\n",
    "            \"Constant\",\n",
    "            inputs=[],\n",
    "            outputs=[\"shape\"],\n",
    "            value=onnx.helper.make_tensor(\n",
    "                name=\"const_tensor\",\n",
    "                data_type=onnx.TensorProto.INT64,\n",
    "                dims=shape_array.shape,\n",
    "                vals=shape_array.flatten().astype(\"int64\"),\n",
    "            ),\n",
    "        )\n",
    "    else:\n",
    "        raise TypeError(\"Invalid dtype\")\n",
    "    expand_node = helper.make_node(\"Expand\", [\"in\", \"shape\"], [\"out\"])\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [shape_node, expand_node],\n",
    "        \"expand_test\",\n",
    "        inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(data.shape))],\n",
    "        outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_data.shape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=name)\n",
    "\n",
    "    tvm_out = get_tvm_output_with_vm(model, data, target, dev, freeze_params=True)\n",
    "    np.testing.assert_allclose(ref_data, tvm_out)\n",
    "\n",
    "in_shape = (3, 1)\n",
    "shape = (3, 4)\n",
    "data = np.random.uniform(size=in_shape).astype(np.float32)\n",
    "ref_data = np.tile(data, 4)\n",
    "_test_expand(\"expand_with_dim_unchanged_test\", data, shape, ref_data, \"int32\")\n",
    "_test_expand(\"expand_with_dim_unchanged_test\", data, shape, ref_data, \"int64\")\n",
    "\n",
    "in_shape = (3, 1)\n",
    "shape = (2, 1, 6)\n",
    "data = np.random.uniform(size=in_shape).astype(np.float32)\n",
    "ref_data = data * np.ones(shape, dtype=np.float32)\n",
    "_test_expand(\"expand_larger_target_shape_test\", data, shape, ref_data, \"int32\")\n",
    "_test_expand(\"expand_larger_target_shape_test\", data, shape, ref_data, \"int64\")\n",
    "\n",
    "in_shape = (1, 1)\n",
    "shape = (3,)\n",
    "data = np.random.uniform(size=in_shape).astype(np.float32)\n",
    "ref_data = data * np.ones(shape, dtype=np.float32)\n",
    "_test_expand(\"expand_smaller_target_shape_test\", data, shape, ref_data, \"int32\")\n",
    "_test_expand(\"expand_smaller_target_shape_test\", data, shape, ref_data, \"int64\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `depth_to_space`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_depth_to_space(inshape, outshape, mode, block_size):\n",
    "    node = onnx.helper.make_node(\n",
    "        \"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=block_size\n",
    "    )\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [node],\n",
    "        \"depth_to_space_test\",\n",
    "        inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n",
    "        outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=\"depth_to_space_test\")\n",
    "\n",
    "    verify_with_ort(model, [inshape], [outshape], target, dev)\n",
    "\n",
    "# current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.\n",
    "# TO-DO, we can add mode argument to test CRD mode and DCR mode\n",
    "# in the future when we update to a newer onnx version.\n",
    "verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode=\"CRD\", block_size=2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `space_to_depth`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_space_to_depth(inshape, outshape, block_size):\n",
    "    node = onnx.helper.make_node(\n",
    "        \"SpaceToDepth\", inputs=[\"x\"], outputs=[\"y\"], blocksize=block_size\n",
    "    )\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [node],\n",
    "        \"space_to_depth_test\",\n",
    "        inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n",
    "        outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=\"space_to_depth_test\")\n",
    "\n",
    "    verify_with_ort(model, [inshape], [outshape], target, dev)\n",
    "\n",
    "verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `shape`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "in_shape = (4, 3, 3, 4)\n",
    "ref_shape = (6, 2, 4, 3)\n",
    "\n",
    "ref_array = np.array(ref_shape)\n",
    "ref_node = onnx.helper.make_node(\n",
    "    \"Constant\",\n",
    "    inputs=[],\n",
    "    outputs=[\"ref_in\"],\n",
    "    value=onnx.helper.make_tensor(\n",
    "        name=\"const_tensor\",\n",
    "        data_type=onnx.TensorProto.INT32,\n",
    "        dims=ref_array.shape,\n",
    "        vals=ref_array.flatten().astype(int),\n",
    "    ),\n",
    ")\n",
    "reshape_node = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out\"])\n",
    "\n",
    "shape_node = helper.make_node(\"Shape\", [\"out\"], [\"final_out\"])\n",
    "\n",
    "graph = helper.make_graph(\n",
    "    [ref_node, reshape_node, shape_node],\n",
    "    \"shape_test\",\n",
    "    inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n",
    "    outputs=[helper.make_tensor_value_info(\"final_out\", TensorProto.FLOAT, list(ref_shape))],\n",
    ")\n",
    "\n",
    "model = helper.make_model(graph, producer_name=\"shape_test\")\n",
    "\n",
    "x = np.random.uniform(size=in_shape).astype(\"int32\")\n",
    "tvm_out = get_tvm_output(model, x, target, dev, ref_shape, \"int32\")\n",
    "np.testing.assert_allclose(ref_shape, tvm_out)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `power`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_power_iteration(x_shape, y_shape):\n",
    "    if isinstance(y_shape, int):\n",
    "        y_shape = [y_shape]\n",
    "\n",
    "    x = np.random.uniform(size=x_shape).astype(np.float32)\n",
    "    y = np.random.uniform(size=y_shape).astype(np.float32)\n",
    "\n",
    "    np_res = np.power(x, y).astype(np.float32)\n",
    "\n",
    "    res = helper.make_node(\"Pow\", [\"x\", \"y\"], [\"out\"])\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [res],\n",
    "        \"power_test\",\n",
    "        inputs=[\n",
    "            helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n",
    "            helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape)),\n",
    "        ],\n",
    "        outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(np_res.shape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=\"power_test\")\n",
    "\n",
    "    tvm_out = get_tvm_output(model, [x, y], target, dev, np_res.shape)\n",
    "    tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)\n",
    "\n",
    "_test_power_iteration((1, 3), (1))\n",
    "_test_power_iteration((2, 3), (2, 3))\n",
    "_test_power_iteration((2, 3), (1, 3))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `range`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_range(start, limit, delta, dtype):\n",
    "    dtype_map = {\n",
    "        \"float32\": TensorProto.FLOAT,\n",
    "        \"int32\": TensorProto.INT32,\n",
    "        \"int64\": TensorProto.INT64,\n",
    "    }\n",
    "    dtype_onnx = dtype_map[dtype]\n",
    "    y = helper.make_node(\"Range\", [\"start\", \"limit\", \"delta\"], [\"output\"])\n",
    "    graph = helper.make_graph(\n",
    "        [y],\n",
    "        \"range_test\",\n",
    "        inputs=[\n",
    "            helper.make_tensor_value_info(\"start\", dtype_onnx, []),\n",
    "            helper.make_tensor_value_info(\"limit\", dtype_onnx, []),\n",
    "            helper.make_tensor_value_info(\"delta\", dtype_onnx, []),\n",
    "        ],\n",
    "        outputs=[\n",
    "            helper.make_tensor_value_info(\n",
    "                \"output\", dtype_onnx, np.arange(start, limit, delta).shape\n",
    "            )\n",
    "        ],\n",
    "    )\n",
    "    model = helper.make_model(graph, producer_name=\"range_test\")\n",
    "    inputs = [np.array(x).astype(dtype) for x in [start, limit, delta]]\n",
    "    verify_with_ort_with_inputs(model, inputs, target=target, dev=dev, use_vm=True)\n",
    "\n",
    "for t in [\"float32\", \"int32\", \"int64\"]:\n",
    "    verify_range(0, 10, 1, t)\n",
    "    verify_range(2, 8, 2, t)\n",
    "    verify_range(-3, 6, 4, t)\n",
    "    verify_range(-2, -7, -1, t)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `squeeze`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_squeeze_once(in_shape, out_shape, axes=None):\n",
    "    y = helper.make_node(\"Squeeze\", [\"in\"], [\"out\"], axes=axes)\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [y],\n",
    "        \"squeeze_test\",\n",
    "        inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n",
    "        outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=\"squeeze_test\")\n",
    "    x = np.random.uniform(size=in_shape).astype(\"float32\")\n",
    "    verify_with_ort_with_inputs(model, [x], [out_shape], target=target, dev=dev, opset=11)\n",
    "\n",
    "test_squeeze_once((1, 3, 1, 3, 1, 1), (3, 3), [0, 2, 4, 5])\n",
    "test_squeeze_once((1, 3, 1, 3, 1, 1), (3, 3))  # empty axis.\n",
    "test_squeeze_once((), ())  # scalar testing."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `flatten`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_flatten(in_shape, axis, ref_shape):\n",
    "    flatten_node = helper.make_node(\"Flatten\", [\"in\"], [\"out\"], axis=axis)\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [flatten_node],\n",
    "        \"flatten_test\",\n",
    "        inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n",
    "        outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=\"flatten_test\")\n",
    "    verify_with_ort(model, [in_shape], target=target, dev=dev)\n",
    "\n",
    "verify_flatten((1, 3, 4, 4), 1, (1, 48))\n",
    "verify_flatten((1,), 1, (1, 1))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `unsqueeze`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "in_shape = (3, 3)\n",
    "axis = (0, 3, 4)\n",
    "out_shape = (1, 3, 3, 1, 1)\n",
    "y = helper.make_node(\"Unsqueeze\", [\"in\"], [\"out\"], axes=list(axis))\n",
    "\n",
    "graph = helper.make_graph(\n",
    "    [y],\n",
    "    \"squeeze_test\",\n",
    "    inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n",
    "    outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n",
    ")\n",
    "\n",
    "model = helper.make_model(graph, producer_name=\"squeeze_test\")\n",
    "verify_with_ort(model, [in_shape], target=target, dev=dev, opset=11)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `unsqueeze_with_neg_axes`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_unsqueeze_with_neg_axes(opset=11):\n",
    "    in_shape = (2, 3, 4)\n",
    "    axis = (-2, -1)\n",
    "    out_shape = (2, 3, 4, 1, 1)\n",
    "    if opset < 13:\n",
    "        y = helper.make_node(\"Unsqueeze\", [\"in\"], [\"out\"], axes=list(axis))\n",
    "        nodes = [y]\n",
    "    else:\n",
    "        axes = np.array(list(axis)).astype(np.int64)\n",
    "        axes = helper.make_node(\n",
    "            \"Constant\",\n",
    "            inputs=[],\n",
    "            outputs=[\"axes\"],\n",
    "            value=onnx.helper.make_tensor(\n",
    "                name=\"const_axes\",\n",
    "                data_type=onnx.TensorProto.INT64,\n",
    "                dims=axes.shape,\n",
    "                vals=axes.flatten().astype(int),\n",
    "            ),\n",
    "        )\n",
    "        y = helper.make_node(\"Unsqueeze\", [\"in\", \"axes\"], [\"out\"])\n",
    "        nodes = [axes, y]\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        nodes,\n",
    "        \"squeeze_test\",\n",
    "        inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n",
    "        outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=\"squeeze_test\")\n",
    "    verify_with_ort(model, [in_shape], target=target, dev=dev, opset=opset)\n",
    "\n",
    "verify_unsqueeze_with_neg_axes()\n",
    "verify_unsqueeze_with_neg_axes(opset=13)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `gather`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_gather(in_shape, indices, axis, dtype):\n",
    "    x = np.random.uniform(size=in_shape).astype(dtype)\n",
    "    indices = np.array(indices, dtype=\"int64\")\n",
    "    out_np = np.take(x, indices, axis=axis)\n",
    "\n",
    "    y = helper.make_node(\"Gather\", [\"in\", \"indices\"], [\"out\"], axis=axis)\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [y],\n",
    "        \"gather_test\",\n",
    "        inputs=[\n",
    "            helper.make_tensor_value_info(\n",
    "                \"in\", helper.np_dtype_to_tensor_dtype(np.dtype(dtype)), list(in_shape)\n",
    "            ),\n",
    "            helper.make_tensor_value_info(\"indices\", TensorProto.INT64, list(indices.shape)),\n",
    "        ],\n",
    "        outputs=[\n",
    "            helper.make_tensor_value_info(\n",
    "                \"out\", helper.np_dtype_to_tensor_dtype(np.dtype(dtype)), list(out_np.shape)\n",
    "            )\n",
    "        ],\n",
    "    )\n",
    "    model = helper.make_model(graph, producer_name=\"gather_test\")\n",
    "    verify_with_ort_with_inputs(model, [x, indices], target=target, dev=dev, dtype=dtype)\n",
    "\n",
    "verify_gather((4,), [1], 0, \"int32\")\n",
    "verify_gather((1, 4), [0], 0, \"int32\")\n",
    "verify_gather((4,), [[[1, 0], [0, 1]]], 0, \"float32\")\n",
    "verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, \"int32\")\n",
    "verify_gather((3, 3, 3), [[[1, 0]]], -1, \"int32\")\n",
    "verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, \"float32\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `dynamic_gather`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm import relay\n",
    "dtype = \"float32\"\n",
    "in_shape = [2, 2]\n",
    "indices = 1\n",
    "axis = 1\n",
    "x = np.random.uniform(size=in_shape).astype(dtype)\n",
    "indices = np.array(indices, dtype=\"int64\")\n",
    "out_np = np.take(x, indices, axis=axis)\n",
    "\n",
    "indices = helper.make_node(\n",
    "    \"Constant\",\n",
    "    inputs=[],\n",
    "    outputs=[\"indices\"],\n",
    "    value=onnx.helper.make_tensor(\n",
    "        name=\"const_indices\",\n",
    "        data_type=onnx.TensorProto.INT64,\n",
    "        dims=[],\n",
    "        vals=[1],\n",
    "    ),\n",
    ")\n",
    "y = helper.make_node(\"Gather\", [\"in\", \"indices\"], [\"out\"], axis=axis)\n",
    "\n",
    "graph = helper.make_graph(\n",
    "    [indices, y],\n",
    "    \"gather_test\",\n",
    "    inputs=[\n",
    "        helper.make_tensor_value_info(\n",
    "            \"in\", helper.np_dtype_to_tensor_dtype(np.dtype(dtype)), [\"?\", \"?\"]\n",
    "        ),\n",
    "    ],\n",
    "    outputs=[\n",
    "        helper.make_tensor_value_info(\n",
    "            \"out\", helper.np_dtype_to_tensor_dtype(np.dtype(dtype)), [\"?\"] * len(out_np.shape)\n",
    "        )\n",
    "    ],\n",
    ")\n",
    "model = helper.make_model(graph, producer_name=\"dynamic_gather_test\")\n",
    "\n",
    "mod, params = relay.frontend.from_onnx(model)\n",
    "\n",
    "result = relay.create_executor(\"vm\", mod=mod, device=dev, target=target).evaluate()(x, **params)\n",
    "np.testing.assert_allclose(out_np, result.numpy(), rtol=1e-5, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `gatherelements`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_gatherelements(in_shape, indices, axis):\n",
    "    x = np.random.uniform(size=in_shape).astype(\"float32\")\n",
    "    indices = np.array(indices, dtype=\"int32\")\n",
    "\n",
    "    y = helper.make_node(\"GatherElements\", [\"data\", \"indices\"], [\"output\"], axis=axis)\n",
    "    graph = helper.make_graph(\n",
    "        [y],\n",
    "        \"gather_elements_test\",\n",
    "        inputs=[\n",
    "            helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(in_shape)),\n",
    "            helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n",
    "        ],\n",
    "        outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(in_shape))],\n",
    "    )\n",
    "    model = helper.make_model(graph, producer_name=\"gather_elements_test\")\n",
    "\n",
    "    verify_with_ort_with_inputs(model, [x, indices], target=target, dev=dev)\n",
    "\n",
    "verify_gatherelements((4,), [3, 0, 2, 1], 0)\n",
    "verify_gatherelements((2, 2), [[1, 0], [0, 1]], 0)\n",
    "verify_gatherelements((2, 2), [[0, 0], [1, 0]], 1)\n",
    "verify_gatherelements((2, 2), [[1, 0], [0, 1]], 1)\n",
    "\n",
    "indices = [\n",
    "    [[1, 0, 0], [1, 0, 1], [0, 1, 1]],\n",
    "    [[1, 1, 1], [1, 2, 1], [1, 0, 1]],\n",
    "    [[1, 2, 1], [1, 2, 1], [1, 2, 1]],\n",
    "]\n",
    "\n",
    "verify_gatherelements((3, 3, 3), indices, 2)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `scatter`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_scatter(in_shape, indices, axis):\n",
    "    x = np.random.uniform(size=in_shape).astype(\"float32\")\n",
    "    indices = np.array(indices, dtype=\"int32\")\n",
    "    updates = np.random.uniform(size=indices.shape).astype(\"float32\")\n",
    "\n",
    "    y = helper.make_node(\"Scatter\", [\"data\", \"indices\", \"updates\"], [\"output\"], axis=axis)\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [y],\n",
    "        \"scatter_test\",\n",
    "        inputs=[\n",
    "            helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(in_shape)),\n",
    "            helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n",
    "            helper.make_tensor_value_info(\"updates\", TensorProto.FLOAT, list(indices.shape)),\n",
    "        ],\n",
    "        outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(in_shape))],\n",
    "    )\n",
    "    model = helper.make_model(graph, producer_name=\"scatter_test\")\n",
    "    # Scatter operator has been supported from version 9 and\n",
    "    # deprecated since version 11 of the default ONNX operator set\n",
    "    verify_with_ort_with_inputs(model, [x, indices, updates], target=target, dev=dev, opset=9)\n",
    "\n",
    "verify_scatter((4,), [1], 0)\n",
    "verify_scatter((1, 4), [[0]], 0)\n",
    "verify_scatter((4,), [2, 3], 0)\n",
    "verify_scatter((2, 2), [[1, 0], [0, 1]], 1)\n",
    "verify_scatter((3, 3, 3), [[[-1, -3]]], -1)\n",
    "verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `scatter_elements`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_scatter_elements(in_shape, indices, axis=0, reduction=\"update\"):\n",
    "    x = np.random.uniform(size=in_shape).astype(\"float32\")\n",
    "    indices = np.array(indices, dtype=\"int32\")\n",
    "    updates = np.random.uniform(size=indices.shape).astype(\"float32\")\n",
    "\n",
    "    scatter_elements_node = helper.make_node(\n",
    "        \"ScatterElements\",\n",
    "        [\"data\", \"indices\", \"updates\"],\n",
    "        [\"output\"],\n",
    "        axis=axis,\n",
    "        reduction=reduction,\n",
    "    )\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [scatter_elements_node],\n",
    "        \"scatter_elements_test\",\n",
    "        inputs=[\n",
    "            helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(in_shape)),\n",
    "            helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n",
    "            helper.make_tensor_value_info(\"updates\", TensorProto.FLOAT, list(indices.shape)),\n",
    "        ],\n",
    "        outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(in_shape))],\n",
    "    )\n",
    "    model = helper.make_model(graph, producer_name=\"scatter_elements_test\")\n",
    "    verify_with_ort_with_inputs(model, [x, indices, updates], target=target, dev=dev)\n",
    "\n",
    "# Usual scatter for 1d input\n",
    "verify_scatter_elements((4,), [2, 3])\n",
    "# Usual scatter with specified positive axis\n",
    "verify_scatter_elements((2, 2), [[1, 0], [0, 1]], 1)\n",
    "# Usual scatter for 3d input with spicified negative indices and axis\n",
    "verify_scatter_elements((3, 3, 3), [[[-1, -3]]], -1)\n",
    "# Usual scatter for 4d input\n",
    "verify_scatter_elements((4, 3, 5, 6), [[[[2, 1, 0, 0]]]])\n",
    "# Scatter elements with addition reduction of duplicates\n",
    "verify_scatter_elements(\n",
    "    (3, 3, 3),\n",
    "    [[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],\n",
    "    0,\n",
    "    \"add\",\n",
    ")\n",
    "# Scatter elements with reduction and specified axis\n",
    "verify_scatter_elements((3, 3, 3), [[[2, 2, 2], [1, 1, 1], [0, 0, 0]]], 2, \"add\")\n",
    "# Scatter elements with multiplication reduction of duplicates\n",
    "verify_scatter_elements(\n",
    "    (3, 3, 3),\n",
    "    [[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],\n",
    "    0,\n",
    "    \"mul\",\n",
    ")\n",
    "# TODO(vvchernov): min and max options are supported from 18 version, but CI supports 17 only\n",
    "# # Scatter elements with min reduction of duplicates\n",
    "# verify_scatter_elements(\n",
    "#     (3, 3, 3),\n",
    "#     [[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],\n",
    "#     0,\n",
    "#     \"min\",\n",
    "# )\n",
    "# # Scatter elements with max reduction of duplicates\n",
    "# verify_scatter_elements(\n",
    "#     (3, 3, 3),\n",
    "#     [[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],\n",
    "#     0,\n",
    "#     \"max\",\n",
    "# )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `slice`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-07-01 10:55:12.497323434 [W:onnxruntime:, model.cc:183 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 1 model may run depending upon legacy support of some older opset version operators.\n",
      "2023-07-01 10:55:12.497650501 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset: 1\n",
      "2023-07-01 10:55:12.626164881 [W:onnxruntime:, model.cc:183 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 1 model may run depending upon legacy support of some older opset version operators.\n",
      "2023-07-01 10:55:12.626452322 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset: 1\n",
      "2023-07-01 10:55:12.824904112 [W:onnxruntime:, model.cc:183 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 1 model may run depending upon legacy support of some older opset version operators.\n",
      "2023-07-01 10:55:12.825170974 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset: 1\n",
      "2023-07-01 10:55:13.001490506 [W:onnxruntime:, model.cc:183 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 1 model may run depending upon legacy support of some older opset version operators.\n",
      "2023-07-01 10:55:13.001881928 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset: 1\n",
      "2023-07-01 10:55:13.182445702 [W:onnxruntime:, model.cc:183 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 1 model may run depending upon legacy support of some older opset version operators.\n",
      "2023-07-01 10:55:13.182824473 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset: 1\n",
      "2023-07-01 10:55:13.360154885 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:13.360182594 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:13.360192159 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:13.828243801 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:13.828265080 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:13.828271177 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.256960147 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.256981384 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.384909082 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.384928057 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.384933749 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.786366893 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.786389572 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:14.786394986 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:15.208355786 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:15.208392578 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:15.208404798 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:15.209389984 [W:onnxruntime:, execution_frame.cc:857 VerifyOutputSizes] Expected shape from model of {20,9,5} does not match actual shape of {20,10,4} for output out\n",
      "2023-07-01 10:55:15.624473823 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:15.624496258 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:15.624502027 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:16.221291288 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:16.221313960 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:16.543084490 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:16.543106694 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:16.543112250 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:17.133941641 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:17.133962514 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:17.133968120 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:17.923953436 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:17.923979762 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:17.923988019 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:18.707745956 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:18.707767855 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:19.035065115 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:19.035086247 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:19.035091746 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:19.963091586 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:19.963115689 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:19.963120895 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.449319403 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.449344019 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.449349980 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.449354871 [W:onnxruntime:, graph.cc:1283 Graph] Initializer steps appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.847609594 [W:onnxruntime:, graph.cc:1283 Graph] Initializer starts appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.847662829 [W:onnxruntime:, graph.cc:1283 Graph] Initializer ends appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.847685836 [W:onnxruntime:, graph.cc:1283 Graph] Initializer axes appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2023-07-01 10:55:20.847705841 [W:onnxruntime:, graph.cc:1283 Graph] Initializer steps appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
     ]
    }
   ],
   "source": [
    "def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):\n",
    "    if axes:\n",
    "        y = helper.make_node(\"Slice\", [\"in\"], [\"out\"], axes=axes, starts=starts, ends=ends)\n",
    "    else:\n",
    "        y = helper.make_node(\"Slice\", [\"in\"], [\"out\"], starts=starts, ends=ends)\n",
    "\n",
    "    graph = helper.make_graph(\n",
    "        [y],\n",
    "        \"slice_test\",\n",
    "        inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n",
    "        outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n",
    "    )\n",
    "\n",
    "    model = helper.make_model(graph, producer_name=\"slice_test\")\n",
    "    verify_with_ort_with_inputs(\n",
    "        model, [indata], [outdata.shape], opset=1, target=target, dev=dev\n",
    "    )\n",
    "\n",
    "def _test_slice_iteration_v10(indata, outdata, **attrs):\n",
    "    starts = attrs[\"starts\"]\n",
    "    ends = attrs[\"ends\"]\n",
    "    axes = None if \"axes\" not in attrs else attrs[\"axes\"]\n",
    "    steps = None if \"steps\" not in attrs else attrs[\"steps\"]\n",
    "    starts = np.asarray(starts)\n",
    "    ends = np.asarray(ends)\n",
    "    inputs = [\n",
    "        helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(indata.shape)),\n",
    "        helper.make_tensor_value_info(\"starts\", TensorProto.INT64, list(starts.shape)),\n",
    "        helper.make_tensor_value_info(\"ends\", TensorProto.INT64, list(ends.shape)),\n",
    "    ]\n",
    "    initializer = [\n",
    "        helper.make_tensor(\"starts\", TensorProto.INT64, list(starts.shape), starts),\n",
    "        helper.make_tensor(\"ends\", TensorProto.INT64, list(ends.shape), ends),\n",
    "    ]\n",
    "    nodes = []\n",
    "\n",
    "    if \"add_noop_to_input_attrs\" in attrs:\n",
    "\n",
    "        def add_noop_to_input_attr(attr_name, attr):\n",
    "            output_name = attr_name + \"_output\"\n",
    "\n",
    "            ref_shape = list(np.array(attr).shape)\n",
    "            ref_shape.insert(0, 1)\n",
    "            ref_shape = tuple(ref_shape)\n",
    "            ref_array = np.array(ref_shape)\n",
    "            ref_node = onnx.helper.make_node(\n",
    "                \"Constant\",\n",
    "                inputs=[],\n",
    "                outputs=[\"ref_in_\" + attr_name],\n",
    "                value=onnx.helper.make_tensor(\n",
    "                    name=\"const_tensor__1_\" + attr_name,\n",
    "                    data_type=onnx.TensorProto.INT64,\n",
    "                    dims=ref_array.shape,\n",
    "                    vals=ref_array.flatten().astype(int),\n",
    "                ),\n",
    "            )\n",
    "            in_shape = np.array(attr).shape\n",
    "            in_array = np.array(in_shape)\n",
    "            ref_node2 = onnx.helper.make_node(\n",
    "                \"Constant\",\n",
    "                inputs=[],\n",
    "                outputs=[\"input_shape_\" + attr_name],\n",
    "                value=onnx.helper.make_tensor(\n",
    "                    name=\"const_tensor__2_\" + attr_name,\n",
    "                    data_type=onnx.TensorProto.INT64,\n",
    "                    dims=in_array.shape,\n",
    "                    vals=in_array.flatten().astype(int),\n",
    "                ),\n",
    "            )\n",
    "\n",
    "            reshape1_node = helper.make_node(\n",
    "                \"Reshape\", [attr_name, \"ref_in_\" + attr_name], [\"reshape_\" + attr_name]\n",
    "            )\n",
    "            reshape2_node = helper.make_node(\n",
    "                \"Reshape\", [\"reshape_\" + attr_name, \"input_shape_\" + attr_name], [output_name]\n",
    "            )\n",
    "            return [ref_node, ref_node2, reshape1_node, reshape2_node]\n",
    "\n",
    "    slice_inputs = []\n",
    "    for attr_name in [\"starts\", \"ends\", \"axes\", \"steps\"]:\n",
    "        if attr_name not in attrs:\n",
    "            continue\n",
    "        if \"add_noop_to_input_attrs\" in attrs and attr_name in attrs[\"add_noop_to_input_attrs\"]:\n",
    "            nodes.extend(add_noop_to_input_attr(attr_name, attrs[attr_name]))\n",
    "            slice_inputs.append(attr_name + \"_output\")\n",
    "        else:\n",
    "            slice_inputs.append(attr_name)\n",
    "\n",
    "    if axes:\n",
    "        axes = np.asarray(axes)\n",
    "        inputs.append(\n",
    "            helper.make_tensor_value_info(\"axes\", TensorProto.INT64, list(axes.shape))\n",
    "        )\n",
    "        initializer.append(\n",
    "            helper.make_tensor(\"axes\", TensorProto.INT64, list(axes.shape), axes)\n",
    "        )\n",
    "\n",
    "    if steps:\n",
    "        assert axes is not None and len(axes) == len(steps)\n",
    "        steps = np.asarray(steps)\n",
    "        inputs.append(\n",
    "            helper.make_tensor_value_info(\"steps\", TensorProto.INT64, list(axes.shape))\n",
    "        )\n",
    "        initializer.append(\n",
    "            helper.make_tensor(\"steps\", TensorProto.INT64, list(steps.shape), steps)\n",
    "        )\n",
    "\n",
    "    y = helper.make_node(\"Slice\", [\"data\", *slice_inputs], [\"out\"])\n",
    "\n",
    "    nodes.append(y)\n",
    "    graph = helper.make_graph(\n",
    "        nodes,\n",
    "        \"slice_test\",\n",
    "        inputs=inputs,\n",
    "        outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n",
    "        initializer=initializer,\n",
    "    )\n",
    "    model = helper.make_model(graph, producer_name=\"slice_test\")\n",
    "    verify_with_ort_with_inputs(\n",
    "        model, [indata], opset=10, freeze_params=True, use_vm=True, target=target, dev=dev\n",
    "    )\n",
    "\n",
    "x = np.random.randn(20, 10, 5).astype(np.float32)\n",
    "_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))\n",
    "_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(10, 3), axes=(1, 0))\n",
    "_test_slice_iteration_v1(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))\n",
    "_test_slice_iteration_v1(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))\n",
    "_test_slice_iteration_v1(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))\n",
    "_test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))\n",
    "_test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(10, 3), axes=(1, 0))\n",
    "_test_slice_iteration_v10(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))\n",
    "_test_slice_iteration_v10(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))\n",
    "_test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))\n",
    "_test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(-1,))\n",
    "_test_slice_iteration_v10(\n",
    "    x,\n",
    "    x[0:3, 0:10],\n",
    "    starts=(0, 0),\n",
    "    ends=(3, 10),\n",
    "    axes=(0, 1),\n",
    "    add_noop_to_input_attrs=[\"starts\"],\n",
    ")\n",
    "_test_slice_iteration_v10(\n",
    "    x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4), add_noop_to_input_attrs=[\"ends\"]\n",
    ")\n",
    "_test_slice_iteration_v10(\n",
    "    x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,), add_noop_to_input_attrs=[\"axes\"]\n",
    ")\n",
    "_test_slice_iteration_v10(\n",
    "    x,\n",
    "    x[:, 0:-1],\n",
    "    starts=(0,),\n",
    "    ends=(-1,),\n",
    "    axes=(1,),\n",
    "    add_noop_to_input_attrs=[\"starts\", \"ends\"],\n",
    ")\n",
    "_test_slice_iteration_v10(\n",
    "    x,\n",
    "    x[0:3, 0:10],\n",
    "    starts=(0, 0),\n",
    "    ends=(3, 10),\n",
    "    axes=(0, 1),\n",
    "    add_noop_to_input_attrs=[\"ends\", \"axes\"],\n",
    ")\n",
    "_test_slice_iteration_v10(\n",
    "    x,\n",
    "    x[:, :, 3:4],\n",
    "    starts=(0, 0, 3),\n",
    "    ends=(20, 10, 4),\n",
    "    add_noop_to_input_attrs=[\"starts\", \"axes\"],\n",
    ")\n",
    "_test_slice_iteration_v10(\n",
    "    x,\n",
    "    x[:, 1:1000],\n",
    "    starts=(1,),\n",
    "    ends=(1000,),\n",
    "    axes=(1,),\n",
    "    add_noop_to_input_attrs=[\"starts\", \"ends\", \"axes\"],\n",
    ")\n",
    "x = np.random.randn(1, 1, 1, 128).astype(np.float32)\n",
    "_test_slice_iteration_v10(\n",
    "    x, x, starts=(0, 0), ends=(9223372036854775807, 9223372036854775807), axes=(0, 3)\n",
    ")\n",
    "\n",
    "x = np.random.randn(4, 4).astype(np.float32)\n",
    "_test_slice_iteration_v10(\n",
    "    x, x[:, 1::2], starts=(1,), ends=(9223372036854775807,), axes=(1,), steps=(2,)\n",
    ")\n",
    "_test_slice_iteration_v10(\n",
    "    x,\n",
    "    x[0::1, 1::2],\n",
    "    starts=(0, 1),\n",
    "    ends=(4, 4),\n",
    "    axes=(0, 1),\n",
    "    steps=(1, 2),\n",
    ")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `onnx_op_elementwise`"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tvmx",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.9"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
