{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 翻译 tensorflow 代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import set_env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import os\n",
    "import warnings\n",
    "os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n",
    "import tensorflow as tf\n",
    "tf.get_logger().setLevel(\"ERROR\")\n",
    "warnings.simplefilter(\"ignore\")\n",
    "from tensorflow.python.framework import constant_op\n",
    "from tensorflow.python.ops import nn_ops\n",
    "from tensorflow.python.ops import array_ops\n",
    "from tensorflow.python.ops import math_ops\n",
    "from tensorflow.python.ops import variables\n",
    "\n",
    "try:\n",
    "    import tensorflow.compat.v1 as tf\n",
    "\n",
    "    tf.disable_v2_behavior()\n",
    "except ImportError:\n",
    "    import tensorflow as tf\n",
    "\n",
    "import tvm\n",
    "import tvm.testing\n",
    "import tvm.relay.testing.tf as tf_testing\n",
    "from tvm.contrib.msc.framework.tensorflow.frontend import translate\n",
    "from tvm.contrib.msc.framework.tensorflow import codegen"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 只允许 TF 使用一半的 GPU 内存，另一半留给 TVM\n",
    "gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n",
    "gpu_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n",
    "gpu_sess.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "常用函数："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def convert_to_list(x):\n",
    "    if not isinstance(x, list):\n",
    "        x = [x]\n",
    "    return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_tf_graph(sess, input_data, input_node, output_node):\n",
    "    \"\"\"Generic function to execute tensorflow\"\"\"\n",
    "\n",
    "    input_data = convert_to_list(input_data)\n",
    "    input_node = convert_to_list(input_node)\n",
    "    output_node = convert_to_list(output_node)\n",
    "\n",
    "    tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]\n",
    "\n",
    "    input_dict = {e: input_data[i] for i, e in enumerate(input_node)}\n",
    "    if len(input_node) == 1 and input_node[0] == \"\":\n",
    "        output_data = sess.run(tensor)\n",
    "    else:\n",
    "        output_data = sess.run(tensor, input_dict)\n",
    "    return output_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_graph_def(in_data, in_name, out_name):\n",
    "    \"\"\"Get tf.GraphDef for translate\"\"\"\n",
    "\n",
    "    def name_without_num(name):\n",
    "        return name.split(\":\")[0] if \":\" in name else name\n",
    "\n",
    "    out_name = convert_to_list(out_name)\n",
    "    out_node = [name_without_num(name) for name in out_name]\n",
    "    in_data = convert_to_list(in_data)\n",
    "    in_name = convert_to_list(in_name)\n",
    "\n",
    "    with tf.Session() as sess:\n",
    "        sess.run(variables.global_variables_initializer())\n",
    "        final_graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)\n",
    "        golden = run_tf_graph(sess, in_data, in_name, out_name)\n",
    "    return final_graph_def, golden"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def verify_model(graph_def, golden, in_data, in_name, out_name, use_out_name=True):\n",
    "    \"\"\"Generic function to generate and compare tensorflow and MSC-TFV1 output\"\"\"\n",
    "\n",
    "    out_name = convert_to_list(out_name)\n",
    "    in_data = convert_to_list(in_data)\n",
    "    in_name = convert_to_list(in_name)\n",
    "    shape_dict = {i: d.shape for i, d in zip(in_name, in_data)}\n",
    "    graph, weights = translate.from_tensorflow(graph_def, shape_dict, out_name)\n",
    "    with tf.Graph().as_default():\n",
    "        outputs = codegen.to_tensorflow(graph, weights)\n",
    "        with tf.Session() as sess:\n",
    "            sess.run(variables.global_variables_initializer())\n",
    "            if not use_out_name:\n",
    "                out_name = [o.name for o in convert_to_list(outputs)]\n",
    "            result = run_tf_graph(sess, in_data, in_name, out_name)\n",
    "\n",
    "    golden = convert_to_list(golden)\n",
    "    result = convert_to_list(result)\n",
    "    assert len(golden) == len(result), \"golden {} mismatch with result {}\".format(\n",
    "        len(golden), len(result)\n",
    "    )\n",
    "    for gol_r, new_r in zip(golden, result):\n",
    "        if isinstance(gol_r, np.ndarray):\n",
    "            np.testing.assert_allclose(gol_r, new_r, atol=1e-5, rtol=1e-5)\n",
    "        else:\n",
    "            assert gol_r == new_r"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 测试 tensorflow1 转译器对池化的支持情况"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_pooling(input_shape, **kwargs):\n",
    "    \"\"\"One iteration of pool operation with given shapes and attributes\"\"\"\n",
    "\n",
    "    x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=input_shape, dtype=\"float32\")\n",
    "        nn_ops.pool(in_data, **kwargs)\n",
    "        out_name = \"max_pool:0\" if kwargs[\"pooling_type\"] == \"MAX\" else \"avg_pool:0\"\n",
    "        io_info = {\"in_data\": x, \"in_name\": \"Placeholder:0\", \"out_name\": out_name}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "for pool_type in [\"AVG\", \"MAX\"]:\n",
    "    _test_pooling(\n",
    "        input_shape=[2, 9, 10, 2],\n",
    "        window_shape=[2, 1],\n",
    "        padding=\"SAME\",\n",
    "        pooling_type=pool_type,\n",
    "        dilation_rate=[1, 1],\n",
    "        strides=[1, 1],\n",
    "    )\n",
    "\n",
    "    _test_pooling(\n",
    "        input_shape=[2, 9, 10, 2],\n",
    "        window_shape=[2, 1],\n",
    "        padding=\"VALID\",\n",
    "        pooling_type=pool_type,\n",
    "        dilation_rate=[1, 1],\n",
    "        strides=[1, 1],\n",
    "    )\n",
    "\n",
    "    _test_pooling(\n",
    "        input_shape=[1, 2, 1],\n",
    "        window_shape=[1],\n",
    "        padding=\"VALID\",\n",
    "        pooling_type=pool_type,\n",
    "        dilation_rate=[1],\n",
    "    )\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "显式填充（`tf.VERSION >= 2.4.1`）："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "_test_pooling(\n",
    "    input_shape=[2, 9, 10, 2],\n",
    "    window_shape=[4, 4],\n",
    "    padding=[[0, 0], [0, 1], [2, 3], [0, 0]],\n",
    "    pooling_type=\"MAX\",\n",
    "    dilation_rate=[1, 1],\n",
    "    strides=[1, 1],\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 测试 tensorflow1 转译器对卷积的支持情况"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_convolution(\n",
    "    opname,\n",
    "    tensor_in_sizes,\n",
    "    filter_in_sizes,\n",
    "    dilations,\n",
    "    strides,\n",
    "    padding,\n",
    "    data_format,\n",
    "):\n",
    "    \"\"\"One iteration of convolution with given shapes and attributes\"\"\"\n",
    "    total_size_1 = np.prod(tensor_in_sizes)\n",
    "    total_size_2 = np.prod(filter_in_sizes)\n",
    "    # Initializes the input tensor with array containing incrementing\n",
    "    # numbers from 1.\n",
    "    data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]\n",
    "    filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype=\"float32\")\n",
    "        in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype=\"float32\")\n",
    "        if data_format == \"NHWC\":\n",
    "            strides = [1] + strides + [1]\n",
    "            dilations = [1] + dilations + [1]\n",
    "        else:\n",
    "            strides = [1, 1] + strides\n",
    "            dilations = [1, 1] + dilations\n",
    "\n",
    "        if opname == \"conv\":\n",
    "            nn_ops.conv2d(\n",
    "                in_data,\n",
    "                in_filter,\n",
    "                strides=strides,\n",
    "                dilations=dilations,\n",
    "                padding=padding,\n",
    "                data_format=data_format,\n",
    "            )\n",
    "            io_info = {\n",
    "                \"in_data\": np.reshape(data_array, tensor_in_sizes).astype(\"float32\"),\n",
    "                \"in_name\": \"Placeholder:0\",\n",
    "                \"out_name\": \"Conv2D:0\",\n",
    "            }\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        else:\n",
    "            nn_ops.depthwise_conv2d_native(\n",
    "                in_data,\n",
    "                in_filter,\n",
    "                strides=strides,\n",
    "                dilations=dilations,\n",
    "                padding=padding,\n",
    "                data_format=data_format,\n",
    "            )\n",
    "            io_info = {\n",
    "                \"in_data\": np.reshape(data_array, tensor_in_sizes).astype(\"float32\"),\n",
    "                \"in_name\": \"Placeholder:0\",\n",
    "                \"out_name\": \"DepthwiseConv2dNative:0\",\n",
    "            }\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "_test_convolution(\"conv\", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], \"SAME\", \"NHWC\")\n",
    "_test_convolution(\"conv\", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], \"VALID\", \"NHWC\")\n",
    "_test_convolution(\"depthwise\", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], \"SAME\", \"NHWC\")\n",
    "_test_convolution(\"depthwise\", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], \"VALID\", \"NHWC\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "显式填充（`tf.VERSION >= 2.4.1`）："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from packaging import version as package_version\n",
    "\n",
    "if package_version.parse(tf.VERSION) >= package_version.parse(\"2.4.1\"):\n",
    "    _test_convolution(\n",
    "        \"conv\",\n",
    "        [4, 8, 8, 16],\n",
    "        [1, 1, 16, 32],\n",
    "        [1, 1],\n",
    "        [1, 1],\n",
    "        [[0, 0], [2, 3], [0, 1], [0, 0]],\n",
    "        \"NHWC\",\n",
    "    )\n",
    "    _test_convolution(\n",
    "        \"depthwise\",\n",
    "        [4, 8, 8, 16],\n",
    "        [1, 1, 16, 1],\n",
    "        [1, 1],\n",
    "        [1, 1],\n",
    "        [[0, 0], [2, 3], [0, 1], [0, 0]],\n",
    "        \"NHWC\",\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 测试 tensorflow1 转译器对 biasadd 的支持情况"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_biasadd(tensor_in_sizes, data_format):\n",
    "    \"\"\"One iteration of biasadd with given shapes and attributes\"\"\"\n",
    "\n",
    "    total_size_1 = 1\n",
    "    for s in tensor_in_sizes:\n",
    "        total_size_1 *= s\n",
    "    tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == \"NCHW\" else [tensor_in_sizes[3]]\n",
    "    total_size_2 = tensor_bias_sizes[0]\n",
    "    # Initializes the input tensor with array containing incrementing\n",
    "    # numbers from 1.\n",
    "    data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]\n",
    "    bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype=\"float32\")\n",
    "        in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype=\"float32\")\n",
    "        nn_ops.bias_add(in_data, in_bias, data_format=data_format)\n",
    "        io_info = {\n",
    "            \"in_data\": np.reshape(data_array, tensor_in_sizes).astype(\"float32\"),\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"BiasAdd:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "_test_biasadd([4, 8, 8, 176], \"NHWC\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 其他测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _test_where_with_broadcast(in_shape, cond_shape):\n",
    "    choice_list = list(np.arange(10).astype(\"float32\"))\n",
    "    t_1 = np.random.choice(choice_list, size=cond_shape)\n",
    "    t_2 = np.random.choice(choice_list, size=cond_shape)\n",
    "    x = np.random.choice(choice_list, size=in_shape)\n",
    "    y = np.random.choice(choice_list, size=in_shape)\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=cond_shape, dtype=\"float32\", name=\"in1\")\n",
    "        in2 = tf.placeholder(shape=cond_shape, dtype=\"float32\", name=\"in2\")\n",
    "        condition = math_ops.less(in1, in2, name=\"less\")\n",
    "        lhs = tf.placeholder(shape=in_shape, dtype=\"float32\", name=\"x\")\n",
    "        rhs = tf.placeholder(shape=in_shape, dtype=\"float32\", name=\"y\")\n",
    "        out = tf.where(condition, lhs, rhs)\n",
    "        io_info = {\n",
    "            \"in_data\": [t_1, t_2, x, y],\n",
    "            \"in_name\": [\"in1:0\", \"in2:0\", \"x:0\", \"y:0\"],\n",
    "            \"out_name\": out.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_where_with_broadcast():\n",
    "    \"\"\"test tensorflow translator for where\"\"\"\n",
    "\n",
    "    _test_where_with_broadcast((5, 2), (5,))\n",
    "    _test_where_with_broadcast((3, 2, 5), (3,))\n",
    "\n",
    "\n",
    "def _test_reshape(data, out_shape):\n",
    "    \"\"\"One iteration of reshape operation with given data and out shape\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n",
    "        array_ops.reshape(in_data, out_shape)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"Placeholder:0\", \"out_name\": \"Reshape:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def _test_reshape_with_call():\n",
    "    \"\"\"relay.expr.Call as shape\"\"\"\n",
    "    data = np.zeros((6, 4, 2))\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n",
    "        out_shape = tf.constant([1, 2, 3], dtype=\"int32\")\n",
    "        out_shape = tf.multiply(out_shape, 2)\n",
    "        array_ops.reshape(in_data, out_shape)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"Placeholder:0\", \"out_name\": \"Reshape:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def _test_reshape_like(data, shape_like):\n",
    "    \"\"\"A special case for reshape.\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n",
    "        in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)\n",
    "        out_shape = array_ops.shape(in_shape_like)\n",
    "        array_ops.reshape(in_data, out_shape)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"Placeholder:0\", \"out_name\": \"Reshape:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_reshape():\n",
    "    \"\"\"test tensorflow translator for reshape\"\"\"\n",
    "\n",
    "    _test_reshape(np.arange(6.0), [2, 3])\n",
    "    _test_reshape(np.arange(6), [-1, 2])\n",
    "    _test_reshape_with_call()\n",
    "    _test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))\n",
    "\n",
    "\n",
    "def _test_sigmoid(data):\n",
    "    \"\"\"One iteration of sigmoid\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n",
    "        _ = math_ops.sigmoid(in_data)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"Placeholder:0\", \"out_name\": \"Sigmoid:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_sigmoid():\n",
    "    \"\"\"test tensorflow translator for concat\"\"\"\n",
    "\n",
    "    _test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype(\"float32\"))\n",
    "\n",
    "\n",
    "def _test_argx(func, data, **kwargs):\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name=\"c0\")\n",
    "        func(inp, name=\"argx\", **kwargs)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"c0:0\", \"out_name\": \"argx:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_argx():\n",
    "    \"\"\"test tensorflow translator for argmax/argmin\"\"\"\n",
    "\n",
    "    data = np.random.uniform(size=(8, 4, 9)).astype(\"float32\")\n",
    "    for output_type in [tf.int64, tf.int32]:\n",
    "        _test_argx(tf.argmax, data=data, axis=1, output_type=output_type)\n",
    "        _test_argx(tf.argmin, data=data, axis=1, output_type=output_type)\n",
    "\n",
    "\n",
    "def _test_matmul(i, j, k, transpose_a=False, transpose_b=False):\n",
    "    \"\"\"One iteration of matmul\"\"\"\n",
    "\n",
    "    a_shape_init = [i, j]\n",
    "    b_shape_init = [j, k]\n",
    "    a_shape = [] + (a_shape_init[::-1] if transpose_a else a_shape_init)\n",
    "    b_shape = [] + (b_shape_init[::-1] if transpose_b else b_shape_init)\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        a_in = tf.placeholder(shape=a_shape, dtype=\"float32\", name=\"A\")\n",
    "        b_in = tf.placeholder(shape=b_shape, dtype=\"float32\", name=\"B\")\n",
    "        result = tf.matmul(a_in, b_in, transpose_a=transpose_a, transpose_b=transpose_b)\n",
    "\n",
    "        a_np = np.random.uniform(high=5.0, size=a_shape).astype(\"float32\")\n",
    "        b_np = np.random.uniform(high=5.0, size=b_shape).astype(\"float32\")\n",
    "        io_info = {\n",
    "            \"in_data\": [a_np, b_np],\n",
    "            \"in_name\": [a_in.name, b_in.name],\n",
    "            \"out_name\": result.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info, use_out_name=False)\n",
    "\n",
    "\n",
    "def test_matmul():\n",
    "    \"\"\"test tensorflow translator for matmul\"\"\"\n",
    "\n",
    "    _test_matmul(1, 3, 6)\n",
    "    _test_matmul(1, 3, 6, True, True)\n",
    "    _test_matmul(1, 3, 6, True, False)\n",
    "    _test_matmul(1, 3, 6, False, True)\n",
    "\n",
    "\n",
    "def _test_batch_matmul(a_shape, b_shape, adjoint_a=False, adjoint_b=False):\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        a_in = tf.placeholder(shape=a_shape, dtype=\"float32\", name=\"A\")\n",
    "        b_in = tf.placeholder(shape=b_shape, dtype=\"float32\", name=\"B\")\n",
    "        result = tf.matmul(a_in, b_in, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=\"batchmatmul\")\n",
    "\n",
    "        a_np = np.random.uniform(high=5.0, size=a_shape).astype(\"float32\")\n",
    "        b_np = np.random.uniform(high=5.0, size=b_shape).astype(\"float32\")\n",
    "        io_info = {\n",
    "            \"in_data\": [a_np, b_np],\n",
    "            \"in_name\": [a_in.name, b_in.name],\n",
    "            \"out_name\": result.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_batch_matmul():\n",
    "    \"\"\"test tensorflow translator for batch_matmul\"\"\"\n",
    "\n",
    "    _test_batch_matmul((3, 5, 4), (3, 4, 5))\n",
    "    _test_batch_matmul((3, 5, 4), (3, 4, 5), True, True)\n",
    "    _test_batch_matmul((3, 5, 4), (3, 5, 4), True, False)\n",
    "    _test_batch_matmul((3, 5, 4), (3, 5, 4), False, True)\n",
    "\n",
    "\n",
    "def _test_stridedslice(\n",
    "    ip_shape,\n",
    "    begin,\n",
    "    end,\n",
    "    stride,\n",
    "    begin_mask=0,\n",
    "    end_mask=0,\n",
    "    new_axis_mask=0,\n",
    "    shrink_axis_mask=0,\n",
    "    ellipsis_mask=0,\n",
    "):\n",
    "    \"\"\"One iteration of a Stridedslice\"\"\"\n",
    "\n",
    "    tf.reset_default_graph()\n",
    "    np_data = np.random.uniform(size=ip_shape).astype(\"float32\")\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(\"float32\", ip_shape, name=\"in_data\")\n",
    "        tf.strided_slice(\n",
    "            in_data,\n",
    "            begin,\n",
    "            end,\n",
    "            stride,\n",
    "            begin_mask=begin_mask,\n",
    "            end_mask=end_mask,\n",
    "            new_axis_mask=new_axis_mask,\n",
    "            shrink_axis_mask=shrink_axis_mask,\n",
    "            ellipsis_mask=ellipsis_mask,\n",
    "            name=\"strided_slice\",\n",
    "        )\n",
    "        io_info = {\"in_data\": np_data, \"in_name\": \"in_data:0\", \"out_name\": \"strided_slice:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "@pytest.mark.xfail(reason=\"MSC does not support Tuple of PrimValue\")\n",
    "def test_stridedslice():\n",
    "    \"\"\"test tensorflow translator for stridedslice\"\"\"\n",
    "\n",
    "    _test_stridedslice([2, 3, 4], [0], [1], [1], shrink_axis_mask=8)\n",
    "    _test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1])\n",
    "    _test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], ellipsis_mask=8)\n",
    "    _test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], new_axis_mask=5)\n",
    "    _test_stridedslice(\n",
    "        [3, 4, 5, 4, 5, 6],\n",
    "        [0, 0, 1, 2, 1],\n",
    "        [2, 3, 4, 5, 3],\n",
    "        [1, 1, 2, 2, 1],\n",
    "        shrink_axis_mask=5,\n",
    "        new_axis_mask=1,\n",
    "        ellipsis_mask=2,\n",
    "        begin_mask=8,\n",
    "        end_mask=8,\n",
    "    )\n",
    "\n",
    "\n",
    "def _test_divide(ip_shape, dtype):\n",
    "    np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)\n",
    "    np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        numerator = tf.placeholder(dtype, ip_shape, name=\"numer\")\n",
    "        denominator = tf.placeholder(dtype, ip_shape, name=\"denomin\")\n",
    "        tf.math.divide(numerator, denominator, name=\"RealDiv\")\n",
    "        io_info = {\n",
    "            \"in_data\": [np_numer, np_denomin],\n",
    "            \"in_name\": [\"numer:0\", \"denomin:0\"],\n",
    "            \"out_name\": \"RealDiv:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def _test_floordiv(ip_shape, dtype):\n",
    "    np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        numerator = tf.placeholder(dtype, ip_shape, name=\"numer\")\n",
    "        tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name=\"FloorDiv\")\n",
    "        io_info = {\"in_data\": [np_numer], \"in_name\": [\"numer:0\"], \"out_name\": \"FloorDiv:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_divide():\n",
    "    \"\"\"test tensorflow translator for div\"\"\"\n",
    "\n",
    "    _test_divide((4, 3, 7), \"float32\")\n",
    "    _test_divide((4, 3, 7), \"int32\")\n",
    "    _test_floordiv((4, 3, 7), \"float32\")\n",
    "    _test_floordiv((4, 3, 7), \"int32\")\n",
    "\n",
    "\n",
    "def _test_gather(ip_shape, indice_shape, indice_value, axis, batch_dims):\n",
    "    \"\"\"One iteration of a GatherV2\"\"\"\n",
    "\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(\"float32\", ip_shape, name=\"in_data\")\n",
    "        indices = tf.placeholder(\"int32\", indice_shape, name=\"indices\")\n",
    "        out = tf.gather(in_data, indices, axis=axis, batch_dims=batch_dims)\n",
    "        np_data = np.random.uniform(1, 10, size=ip_shape).astype(\"float32\")\n",
    "\n",
    "        def _fill_indices(indice_value):\n",
    "            indices = np.array(ip_shape, dtype=\"float32\")\n",
    "            if isinstance(indice_value, int):\n",
    "                indices = np.array([indice_value], dtype=\"int32\")\n",
    "            else:\n",
    "                indices = np.asarray(indice_value, dtype=\"int32\")\n",
    "            return indices\n",
    "\n",
    "        np_indices = _fill_indices(indice_value)\n",
    "        io_info = {\n",
    "            \"in_data\": [np_data, np_indices],\n",
    "            \"in_name\": [\"in_data:0\", \"indices:0\"],\n",
    "            \"out_name\": out.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_gather():\n",
    "    \"\"\"test tensorflow translator for gather\"\"\"\n",
    "\n",
    "    _test_gather((4,), (1,), 1, 0, 0)\n",
    "    _test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0)\n",
    "    _test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, 0)\n",
    "\n",
    "\n",
    "def _test_split(in_shape, axis, num_or_size_splits):\n",
    "    \"\"\"One iteration of a Split\"\"\"\n",
    "    np_data = np.random.uniform(-5, 5, size=in_shape).astype(\"float32\")\n",
    "\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(\"float32\", in_shape, name=\"in_data\")\n",
    "        _ = len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits\n",
    "        split = tf.split(in_data, num_or_size_splits, axis=axis)\n",
    "        relu = [tf.nn.relu(i) for i in split]\n",
    "        io_info = {\n",
    "            \"in_data\": [np_data],\n",
    "            \"in_name\": [\"in_data:0\"],\n",
    "            \"out_name\": [n.name for n in relu],\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    # and now test together with concat\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(\"float32\", in_shape, name=\"in_data\")\n",
    "        splitted = tf.split(in_data, num_or_size_splits, axis=axis)\n",
    "        concat = tf.concat(splitted, axis)\n",
    "        io_info = {\n",
    "            \"in_data\": [np_data],\n",
    "            \"in_name\": [\"in_data:0\"],\n",
    "            \"out_name\": concat.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_split():\n",
    "    \"\"\"test tensorflow translator for split\"\"\"\n",
    "\n",
    "    _test_split((6, 1, 3, 5), 0, 3)\n",
    "    _test_split((6, 1, 3, 5), -4, 3)\n",
    "    _test_split((3, 6, 4), -2, [1, 4, 1])\n",
    "\n",
    "\n",
    "def _test_tile(in_shape, multiples):\n",
    "    np_data = np.random.uniform(-5, 5, size=in_shape).astype(\"float32\")\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(\"float32\", in_shape, name=\"in_data\")\n",
    "        tf.tile(in_data, multiples=multiples, name=\"tile\")\n",
    "        io_info = {\"in_data\": np_data, \"in_name\": \"in_data:0\", \"out_name\": \"tile:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_tile():\n",
    "    \"\"\"test tensorflow translator for tile\"\"\"\n",
    "\n",
    "    _test_tile((2, 2), (2, 3))\n",
    "\n",
    "\n",
    "def _test_clip_by_value(ip_shape, clip_value_min, clip_value_max):\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(\"float32\", ip_shape, name=\"in_data\")\n",
    "        tf.clip_by_value(in_data, clip_value_min, clip_value_max, name=\"ClipByValue\")\n",
    "        np_data = np.random.uniform(-100, 100, size=ip_shape).astype(\"float32\")\n",
    "        io_info = {\"in_data\": np_data, \"in_name\": \"in_data:0\", \"out_name\": \"ClipByValue:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_clip_by_value():\n",
    "    \"\"\"test tensorflow translator for clip\"\"\"\n",
    "\n",
    "    _test_clip_by_value((4,), 0.1, 5.0)\n",
    "\n",
    "\n",
    "def test_multi_input():\n",
    "    \"\"\"test tensorflow translator for multi input\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in1\")\n",
    "        in2 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in2\")\n",
    "        in3 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in3\")\n",
    "        in4 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in4\")\n",
    "\n",
    "        out1 = tf.add(in1, in2, name=\"out1\")\n",
    "        out2 = tf.subtract(in3, in4, name=\"out2\")\n",
    "        _ = tf.multiply(out1, out2, name=\"out\")\n",
    "        in_data = np.arange(9, dtype=\"int32\").reshape([3, 3])\n",
    "        io_info = {\n",
    "            \"in_data\": [in_data, in_data, in_data, in_data],\n",
    "            \"in_name\": [\"in1:0\", \"in2:0\", \"in3:0\", \"in4:0\"],\n",
    "            \"out_name\": \"out:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_multi_output():\n",
    "    \"\"\"test tensorflow translator for multi output\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in1\")\n",
    "        in2 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in2\")\n",
    "        in3 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in3\")\n",
    "        in4 = tf.placeholder(tf.int32, shape=[3, 3], name=\"in4\")\n",
    "\n",
    "        _ = tf.add(in1, in2, name=\"out1\")\n",
    "        _ = tf.subtract(in3, in4, name=\"out2\")\n",
    "        in_data = np.arange(9, dtype=\"int32\").reshape([3, 3])\n",
    "        io_info = {\n",
    "            \"in_data\": [in_data] * 4,\n",
    "            \"in_name\": [\"in1:0\", \"in2:0\", \"in3:0\", \"in4:0\"],\n",
    "            \"out_name\": [\"out1:0\", \"out2:0\"],\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def _test_resize_bilinear(in_shape, to_shape, align_corners):\n",
    "    \"\"\"One iteration of resize bilinear\"\"\"\n",
    "\n",
    "    data = np.random.uniform(size=in_shape).astype(\"float32\")\n",
    "    shape_data = np.array(to_shape).astype(\"int32\")\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n",
    "        shape_data = constant_op.constant(\n",
    "            shape_data, shape=shape_data.shape, dtype=shape_data.dtype\n",
    "        )\n",
    "        tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"Placeholder:0\", \"out_name\": \"ResizeBilinear:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def _test_resize_nearest_neighbor(in_shape, to_shape):\n",
    "    \"\"\"One iteration of resize nearest neighbor\"\"\"\n",
    "\n",
    "    data = np.random.uniform(size=in_shape).astype(\"float32\")\n",
    "    shape_data = np.array(to_shape).astype(\"int32\")\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n",
    "        shape_data = constant_op.constant(\n",
    "            shape_data, shape=shape_data.shape, dtype=shape_data.dtype\n",
    "        )\n",
    "        tf.image.resize_nearest_neighbor(in_data, shape_data, name=\"resize_nearest_neighbor\")\n",
    "        io_info = {\n",
    "            \"in_data\": data,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"resize_nearest_neighbor:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_resize():\n",
    "    \"\"\"test tensorflow translator for resize\"\"\"\n",
    "\n",
    "    _test_resize_bilinear((4, 32, 32, 3), [50, 50], False)\n",
    "    _test_resize_bilinear((6, 32, 32, 3), [20, 20], True)\n",
    "    _test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])\n",
    "\n",
    "\n",
    "def _test_broadcast_to(in_shape, to_shape):\n",
    "    \"\"\"One iteration of broadcast_to\"\"\"\n",
    "\n",
    "    data = np.random.uniform(size=in_shape).astype(\"float32\")\n",
    "    shape_data = np.array(to_shape).astype(\"int32\")\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n",
    "        shape_data = constant_op.constant(\n",
    "            shape_data, shape=shape_data.shape, dtype=shape_data.dtype\n",
    "        )\n",
    "        tf.broadcast_to(in_data, shape_data)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"Placeholder:0\", \"out_name\": \"BroadcastTo:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_broadcast_to():\n",
    "    \"\"\"test tensorflow translator for broadcast_to\"\"\"\n",
    "\n",
    "    _test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])\n",
    "\n",
    "\n",
    "def _test_fill(in_shape):\n",
    "    \"\"\"Use the fill op to create a tensor of ones with non-constant shape.\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        tf.ones(shape=in_shape, dtype=\"float32\")\n",
    "        io_info = {\"in_data\": in_shape, \"in_name\": [], \"out_name\": \"ones:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info, use_out_name=False)\n",
    "\n",
    "\n",
    "def test_fill():\n",
    "    \"\"\"test tensorflow translator for fill\"\"\"\n",
    "\n",
    "    _test_fill((6, 32, 64, 64))\n",
    "\n",
    "\n",
    "def _test_pack(axis, shape, **kwargs):\n",
    "\n",
    "    a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)\n",
    "    b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        tf_a = array_ops.placeholder(shape=shape, dtype=\"float32\", name=\"pl_a\")\n",
    "        tf_b = array_ops.placeholder(shape=shape, dtype=\"float32\", name=\"pl_b\")\n",
    "        tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)\n",
    "        assert tf_c.op.op_def.name == \"Pack\", \"tf.stack() is expected to produce 'Pack' operation\"\n",
    "        io_info = {\"in_data\": [a, b], \"in_name\": [\"pl_a:0\", \"pl_b:0\"], \"out_name\": \"stack:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_pack():\n",
    "    \"\"\"test tensorflow translator for pack\"\"\"\n",
    "\n",
    "    _test_pack(3, [3, 2, 1])\n",
    "\n",
    "\n",
    "def _test_unpack(in_shape, axis):\n",
    "    \"\"\"test operator Unpack\"\"\"\n",
    "    np_data = np.random.uniform(-100, 100, size=in_shape).astype(\"float32\")\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(\"float32\", in_shape, name=\"in_data\")\n",
    "        tf.unstack(in_data, axis=axis, name=\"Unpack\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"Unpack:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_unpack():\n",
    "    \"\"\"test tensorflow translator for unpack\"\"\"\n",
    "\n",
    "    _test_unpack((21, 23, 3), 2)\n",
    "\n",
    "\n",
    "def _test_einsum(equation, *shape_of_input_tensors):\n",
    "    \"\"\"Test Einsum Op\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        inputs_placeholders = []\n",
    "        input_data = []\n",
    "        for idx, shape in enumerate(shape_of_input_tensors):\n",
    "            input_name = f\"input_{idx}\"\n",
    "            inputs_placeholders.append(\n",
    "                tf.placeholder(shape=shape, dtype=\"float32\", name=input_name)\n",
    "            )\n",
    "            input_data.append(np.random.normal(size=shape).astype(\"float32\"))\n",
    "\n",
    "        result = tf.einsum(equation, *inputs_placeholders)\n",
    "        io_info = {\n",
    "            \"in_data\": input_data,\n",
    "            \"in_name\": [ph.name for ph in inputs_placeholders],\n",
    "            \"out_name\": result.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info, use_out_name=False)\n",
    "\n",
    "\n",
    "def test_einsum():\n",
    "    \"\"\"test tensorflow translator for einsum\"\"\"\n",
    "\n",
    "    _test_einsum(\"ij,jk->ik\", [2, 3], [3, 5])  # Matmul\n",
    "    _test_einsum(\"ij,jk\", [2, 3], [3, 5])  # Matmul\n",
    "    _test_einsum(\"i,i->\", [2], [2])  # Dot product\n",
    "    _test_einsum(\"i,j->ij\", [3], [5])  # Outer produce\n",
    "    _test_einsum(\"ij->ji\", [2, 3])  # Transpose\n",
    "    _test_einsum(\"ii->i\", [3, 3])  # Diag\n",
    "    _test_einsum(\"ii\", [3, 3])  # Trace of a square matrix\n",
    "    _test_einsum(\"bij,bjk->bik\", [7, 5, 3], [7, 3, 2])  # Batch matmul\n",
    "\n",
    "\n",
    "def _test_pad(input_shape, paddings, mode, **kwargs):\n",
    "    \"\"\"One iteration of pad operation with given shape\"\"\"\n",
    "\n",
    "    x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = array_ops.placeholder(shape=input_shape, dtype=\"float32\")\n",
    "        pad_values = constant_op.constant(paddings)\n",
    "        _ = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)\n",
    "\n",
    "        if mode == \"CONSTANT\":\n",
    "            if \"constant_values\" in kwargs:\n",
    "                out_name = \"PadV2:0\"\n",
    "            else:\n",
    "                out_name = \"Pad:0\"\n",
    "        else:\n",
    "            out_name = \"MirrorPad:0\"\n",
    "\n",
    "        io_info = {\n",
    "            \"in_data\": x,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": out_name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_pad():\n",
    "    \"\"\"test tensorflow translator for pad\"\"\"\n",
    "\n",
    "    _test_pad((2, 3), [[1, 1], [2, 2]], mode=\"CONSTANT\")\n",
    "    _test_pad((2, 3), [[1, 1], [2, 2]], mode=\"CONSTANT\", constant_values=1.0)\n",
    "\n",
    "\n",
    "def test_logical_and():\n",
    "    \"\"\"test tensorflow translator for logical_and\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name=\"in1\")\n",
    "        in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name=\"in2\")\n",
    "        _ = tf.logical_and(in1, in2, name=\"out\")\n",
    "        in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype(\"bool\")\n",
    "        in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype(\"bool\")\n",
    "        io_info = {\n",
    "            \"in_data\": [in_data1, in_data2],\n",
    "            \"in_name\": [\"in1:0\", \"in2:0\"],\n",
    "            \"out_name\": \"out:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_logical_or():\n",
    "    \"\"\"test tensorflow translator for logical_or\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name=\"in1\")\n",
    "        in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name=\"in2\")\n",
    "        _ = tf.logical_or(in1, in2, name=\"out\")\n",
    "        in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype(\"bool\")\n",
    "        in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype(\"bool\")\n",
    "        io_info = {\n",
    "            \"in_data\": [in_data1, in_data2],\n",
    "            \"in_name\": [\"in1:0\", \"in2:0\"],\n",
    "            \"out_name\": \"out:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_logical_xor():\n",
    "    \"\"\"test tensorflow translator for logical_xor\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name=\"in1\")\n",
    "        in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name=\"in2\")\n",
    "        _ = tf.logical_xor(in1, in2, name=\"out\")\n",
    "        in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype(\"bool\")\n",
    "        in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype(\"bool\")\n",
    "        io_info = {\n",
    "            \"in_data\": [in_data1, in_data2],\n",
    "            \"in_name\": [\"in1:0\", \"in2:0\"],\n",
    "            \"out_name\": \"out:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_logical_not():\n",
    "    \"\"\"test tensorflow translator for logical_not\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name=\"in1\")\n",
    "        _ = tf.logical_not(in1, name=\"out\")\n",
    "        in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype(\"bool\")\n",
    "        io_info = {\n",
    "            \"in_data\": [in_data1],\n",
    "            \"in_name\": [\"in1:0\"],\n",
    "            \"out_name\": \"out:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_where():\n",
    "    \"\"\"test tensorflow translator for where\"\"\"\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        with tf.Session() as _:\n",
    "            input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name=\"input1\")\n",
    "            input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name=\"input2\")\n",
    "            mask = input1 > input2\n",
    "            tf.where(mask, input1 + 1, input2 * 2)\n",
    "            in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype(\"uint32\")\n",
    "            in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype(\"uint32\")\n",
    "            io_info = {\n",
    "                \"in_data\": [in_data1, in_data2],\n",
    "                \"in_name\": [\"input1:0\", \"input2:0\"],\n",
    "                \"out_name\": \"Select:0\",\n",
    "            }\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def _test_transpose(ishape, axes=None):\n",
    "    data = np.random.uniform(size=ishape).astype(np.float32)\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name=\"transpose_data\")\n",
    "\n",
    "        if axes is None:\n",
    "            tf.transpose(in1)\n",
    "        else:\n",
    "            tf.transpose(in1, perm=axes)\n",
    "\n",
    "        io_info = {\n",
    "            \"in_data\": data,\n",
    "            \"in_name\": \"transpose_data:0\",\n",
    "            \"out_name\": \"transpose:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def _test_tranapose_axes_input(ishape, axes):\n",
    "    data = np.random.uniform(size=ishape).astype(np.float32)\n",
    "    axes_np = np.array(axes).astype(np.int32)\n",
    "\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name=\"transpose_data\")\n",
    "\n",
    "        const1 = tf.constant(axes_np, dtype=tf.int32)\n",
    "\n",
    "        # make axes an input to tf.transpose, but not an input to the graph,\n",
    "        # so it can be extracted with infer_value_simulated\n",
    "        axes = tf.reverse(const1, axis=[-1])\n",
    "        tf.transpose(in1, axes)\n",
    "        io_info = {\n",
    "            \"in_data\": data,\n",
    "            \"in_name\": \"transpose_data:0\",\n",
    "            \"out_name\": \"transpose:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_transpose():\n",
    "    \"\"\"test tensorflow translator for transpose\"\"\"\n",
    "\n",
    "    _test_transpose((2, 3, 4), (1, 2, 0))\n",
    "    _test_transpose((2, 3, 4))\n",
    "    _test_tranapose_axes_input((2, 3, 4), (1, 2, 0))\n",
    "    _test_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))\n",
    "\n",
    "\n",
    "def _test_slice_operation_input(input_value, begin_value, size_value):\n",
    "    input_data = np.array(input_value, dtype=np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name=\"input\")\n",
    "        tf.slice(input_tensor, begin_value, size_value, name=\"slice_output\")\n",
    "        io_info = {\n",
    "            \"in_data\": input_data,\n",
    "            \"in_name\": \"input:0\",\n",
    "            \"out_name\": \"slice_output:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "@pytest.mark.xfail(reason=\"MSC does not support Tuple of PrimValue\")\n",
    "def test_slice():\n",
    "    \"\"\"test tensorflow translator for slice\"\"\"\n",
    "\n",
    "    _test_slice_operation_input([1, 1], [0], [2])\n",
    "\n",
    "\n",
    "def test_ceil():\n",
    "    \"\"\"test tensorflow translator for ceil\"\"\"\n",
    "\n",
    "    ishape = (1, 3, 10, 10)\n",
    "    inp_array = np.random.uniform(size=ishape).astype(np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)\n",
    "        tf.ceil(in1)\n",
    "        io_info = {\n",
    "            \"in_data\": inp_array,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"Ceil:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_floor():\n",
    "    \"\"\"test tensorflow translator for floor\"\"\"\n",
    "\n",
    "    ishape = (1, 3, 10, 10)\n",
    "    inp_array = np.random.uniform(size=ishape).astype(np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)\n",
    "        tf.floor(in1)\n",
    "        io_info = {\n",
    "            \"in_data\": inp_array,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"Floor:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_relu():\n",
    "    \"\"\"test tensorflow translator for relu\"\"\"\n",
    "\n",
    "    ishape = (1, 3, 10, 10)\n",
    "    inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)\n",
    "        tf.nn.relu(in1)\n",
    "        io_info = {\n",
    "            \"in_data\": inp_array,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"Relu:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_elu():\n",
    "    \"\"\"test tensorflow translator for elu\"\"\"\n",
    "\n",
    "    ishape = (1, 3, 10, 10)\n",
    "    inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)\n",
    "        tf.nn.elu(in1)\n",
    "        io_info = {\n",
    "            \"in_data\": inp_array,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"Elu:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_selu():\n",
    "    \"\"\"test tensorflow translator for selu\"\"\"\n",
    "\n",
    "    ishape = (1, 3, 10, 10)\n",
    "    inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)\n",
    "        tf.nn.selu(in1)\n",
    "        io_info = {\n",
    "            \"in_data\": inp_array,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"Selu:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_tanh():\n",
    "    \"\"\"test tensorflow translator for tanh\"\"\"\n",
    "\n",
    "    ishape = (1, 3, 10, 10)\n",
    "    inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)\n",
    "        tf.nn.tanh(in1)\n",
    "        io_info = {\n",
    "            \"in_data\": inp_array,\n",
    "            \"in_name\": \"Placeholder:0\",\n",
    "            \"out_name\": \"Tanh:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_softmax():\n",
    "    \"\"\"test tensorflow translator for softmax\"\"\"\n",
    "\n",
    "    def check_softmax(in_shape, axis, dtype):\n",
    "        np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)\n",
    "        tf.reset_default_graph()\n",
    "        with tf.Graph().as_default():\n",
    "            in_data = tf.placeholder(dtype, in_shape, name=\"in_data\")\n",
    "            tf.nn.softmax(in_data, axis=axis, name=\"Softmax\")\n",
    "            io_info = {\n",
    "                \"in_data\": np_data,\n",
    "                \"in_name\": \"in_data:0\",\n",
    "                \"out_name\": \"Softmax:0\",\n",
    "            }\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    check_softmax((2, 3, 5), 2, \"float32\")\n",
    "    check_softmax((2, 3, 5), -1, \"float32\")\n",
    "\n",
    "\n",
    "def test_round():\n",
    "    \"\"\"test tensorflow translator for round\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (5, 7), name=\"in_data\")\n",
    "        tf.round(in_data, name=\"round\")\n",
    "        io_info = {\n",
    "            \"in_data\": np_data,\n",
    "            \"in_name\": \"in_data:0\",\n",
    "            \"out_name\": \"round:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_abs():\n",
    "    \"\"\"test tensorflow translator for abs\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (9, 11), name=\"in_data\")\n",
    "        tf.math.abs(in_data, name=\"abs\")\n",
    "        io_info = {\n",
    "            \"in_data\": np_data,\n",
    "            \"in_name\": \"in_data:0\",\n",
    "            \"out_name\": \"abs:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_squared_difference():\n",
    "    \"\"\"test tensorflow translator for squared_difference\"\"\"\n",
    "\n",
    "    ishape = (1, 3, 10, 14)\n",
    "    inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)\n",
    "    inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name=\"in1\")\n",
    "        in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name=\"in2\")\n",
    "        out = tf.math.squared_difference(in1, in2)\n",
    "        io_info = {\n",
    "            \"in_data\": [inp_array_a, inp_array_b],\n",
    "            \"in_name\": [in1.name, in2.name],\n",
    "            \"out_name\": out.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_sign():\n",
    "    \"\"\"test tensorflow translator for sign\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (5, 7, 11), name=\"in_data\")\n",
    "        tf.sign(in_data, name=\"sign\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"sign:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_square():\n",
    "    \"\"\"test tensorflow translator for square\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (2, 3, 5), name=\"in_data\")\n",
    "        tf.square(in_data, name=\"square\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"square:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_pow_exp():\n",
    "    \"\"\"test tensorflow translator for pow && exp\"\"\"\n",
    "\n",
    "    np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)\n",
    "    np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(tf.float32, (5, 7, 11), name=\"in1\")\n",
    "        in2 = tf.placeholder(tf.float32, (5, 7, 11), name=\"in2\")\n",
    "        in3 = tf.pow(in1, in2, name=\"pow\")\n",
    "        _ = tf.exp(in3, name=\"exp\")\n",
    "        io_info = {\"in_data\": [np_in1, np_in2], \"in_name\": [\"in1:0\", \"in2:0\"], \"out_name\": \"exp:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_unary():\n",
    "    \"\"\"test tensorflow translator for unary\"\"\"\n",
    "\n",
    "    def _test_unary(op, a_min=1, a_max=5, dtype=np.float32):\n",
    "        \"\"\"test unary operators\"\"\"\n",
    "        np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)\n",
    "        tf.reset_default_graph()\n",
    "        with tf.Graph().as_default():\n",
    "            in_data = tf.placeholder(dtype, (2, 3, 5), name=\"in_data\")\n",
    "            out = op(in_data)\n",
    "            io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": out.name}\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    _test_unary(tf.acos, -1, 1)\n",
    "    _test_unary(tf.asin, -1, 1)\n",
    "    _test_unary(tf.atanh, -1, 1)\n",
    "    _test_unary(tf.sinh)\n",
    "    _test_unary(tf.cosh)\n",
    "    _test_unary(tf.acosh)\n",
    "    _test_unary(tf.asinh)\n",
    "    _test_unary(tf.atan)\n",
    "    _test_unary(tf.sin)\n",
    "    _test_unary(tf.cos)\n",
    "    _test_unary(tf.tan)\n",
    "    _test_unary(tf.tanh)\n",
    "    _test_unary(tf.erf)\n",
    "    _test_unary(tf.log)\n",
    "\n",
    "\n",
    "def test_atan2():\n",
    "    \"\"\"test tensorflow translator for atan2\"\"\"\n",
    "\n",
    "    tf.disable_eager_execution()\n",
    "    np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)\n",
    "    np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name=\"in_data_1\")\n",
    "        in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name=\"in_data_2\")\n",
    "        tf.atan2(in_data_1, in_data_2, name=\"atan2\")\n",
    "        io_info = {\n",
    "            \"in_data\": [np_data_1, np_data_2],\n",
    "            \"in_name\": [\"in_data_1:0\", \"in_data_2:0\"],\n",
    "            \"out_name\": \"atan2:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_expm1():\n",
    "    \"\"\"test tensorflow translator for expm1\"\"\"\n",
    "\n",
    "    def _test_expm1(shape):\n",
    "        tf.disable_eager_execution()\n",
    "        np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)\n",
    "        tf.reset_default_graph()\n",
    "        with tf.Graph().as_default():\n",
    "            in_data = tf.placeholder(tf.float32, shape, name=\"in_data\")\n",
    "            tf.expm1(in_data, name=\"expm1\")\n",
    "            io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"expm1:0\"}\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    _test_expm1([2, 5, 2, 5])\n",
    "\n",
    "\n",
    "def test_softsign():\n",
    "    \"\"\"test tensorflow translator for softsign\"\"\"\n",
    "\n",
    "    def _test_softsign(shape):\n",
    "        tf.disable_eager_execution()\n",
    "        np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)\n",
    "        tf.reset_default_graph()\n",
    "        with tf.Graph().as_default():\n",
    "            in_data = tf.placeholder(tf.float32, shape, name=\"in_data\")\n",
    "            tf.nn.softsign(in_data, name=\"softsign\")\n",
    "            io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"softsign:0\"}\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    _test_softsign([2, 5, 2, 5])\n",
    "\n",
    "\n",
    "def test_rint():\n",
    "    \"\"\"test tensorflow translator for rint\"\"\"\n",
    "\n",
    "    def _test_rint(shape):\n",
    "        tf.disable_eager_execution()\n",
    "        np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)\n",
    "        tf.reset_default_graph()\n",
    "        with tf.Graph().as_default():\n",
    "            in_data = tf.placeholder(tf.float32, shape, name=\"in_data\")\n",
    "            tf.math.rint(in_data, name=\"rint\")\n",
    "            io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"rint:0\"}\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    _test_rint([2, 5, 2, 5])\n",
    "\n",
    "\n",
    "def test_negative():\n",
    "    \"\"\"test tensorflow translator for neg\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (224, 224, 3), name=\"in_data\")\n",
    "        tf.negative(in_data, name=\"negative\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"negative:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_log_softmax():\n",
    "    \"\"\"test tensorflow translator for log_softmax\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (9, 11), name=\"in_data\")\n",
    "        tf.math.log_softmax(in_data, name=\"LogSoftmax\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"LogSoftmax:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_softplus():\n",
    "    \"\"\"test tensorflow translator for softplus\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (2, 3, 5), name=\"in_data\")\n",
    "        tf.nn.softplus(in_data, name=\"softplus\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"softplus:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_rsqrt():\n",
    "    \"\"\"test tensorflow translator for rsqrt\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (5, 7, 11), name=\"in_data\")\n",
    "        tf.rsqrt(in_data, name=\"rsqrt\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"rsqrt:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_sqrt():\n",
    "    \"\"\"test tensorflow translator for sqrt\"\"\"\n",
    "\n",
    "    np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        in_data = tf.placeholder(tf.float32, (5, 7, 11), name=\"in_data\")\n",
    "        tf.sqrt(in_data, name=\"sqrt\")\n",
    "        io_info = {\"in_data\": [np_data], \"in_name\": [\"in_data:0\"], \"out_name\": \"sqrt:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_mean():\n",
    "    \"\"\"test tensorflow translator for mean\"\"\"\n",
    "\n",
    "    def check_mean(ishape, **kwargs):\n",
    "        inp_array = np.random.uniform(size=ishape).astype(np.float32)\n",
    "        with tf.Graph().as_default():\n",
    "            in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)\n",
    "            tf.keras.backend.mean(in1, **kwargs)\n",
    "            io_info = {\"in_data\": inp_array, \"in_name\": \"Placeholder:0\", \"out_name\": \"Mean:0\"}\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    check_mean((10, 8, 16, 32))\n",
    "    check_mean((10, 8, 16, 32), axis=(2, 3))\n",
    "    check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)\n",
    "\n",
    "\n",
    "def test_reduce():\n",
    "    \"\"\"test tensorflow translator for reduce\"\"\"\n",
    "\n",
    "    def _check_op(tf_op, ishape, axis, keepdims):\n",
    "        tf.reset_default_graph()\n",
    "        np_data = np.random.uniform(size=ishape).astype(\"float32\")\n",
    "        if tf_op == tf.math.reduce_prod:\n",
    "            axis = 1\n",
    "            np_data = np_data.reshape(1, -1)\n",
    "        with tf.Graph().as_default():\n",
    "            in_data = tf.placeholder(shape=np_data.shape, dtype=\"float32\", name=\"in_data\")\n",
    "            reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name=\"reduce_op\")\n",
    "            io_info = {\"in_data\": np_data, \"in_name\": \"in_data:0\", \"out_name\": reduce_op.name}\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    def _test_math_op(op):\n",
    "        _check_op(op, (8, 16, 32), axis=(-1), keepdims=False)\n",
    "        _check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True)\n",
    "\n",
    "    _test_math_op(tf.math.reduce_max)\n",
    "    _test_math_op(tf.math.reduce_min)\n",
    "    _test_math_op(tf.math.reduce_prod)\n",
    "    _test_math_op(tf.math.reduce_variance)\n",
    "    _test_math_op(tf.math.reduce_std)\n",
    "    _test_math_op(tf.math.reduce_logsumexp)\n",
    "    if package_version.parse(tf.VERSION) >= package_version.parse(\"1.15.0\"):\n",
    "        _test_math_op(tf.math.reduce_euclidean_norm)\n",
    "\n",
    "\n",
    "def _test_rel_op(data, func):\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name=\"in1\")\n",
    "        in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name=\"in2\")\n",
    "        op = func(in1, in2, name=\"op\")\n",
    "        _ = tf.cast(op, tf.int32, name=\"out1\")\n",
    "        io_info = {\n",
    "            \"in_data\": [data[0], data[1]],\n",
    "            \"in_name\": [\"in1:0\", \"in2:0\"],\n",
    "            \"out_name\": \"out1:0\",\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_rel_ops():\n",
    "    \"\"\"test tensorflow translator for relation\"\"\"\n",
    "\n",
    "    t_1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n",
    "    t_2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])\n",
    "    _test_rel_op([t_1, t_2], math_ops.less)\n",
    "    _test_rel_op([t_1, t_2], math_ops.greater)\n",
    "    _test_rel_op([t_1, t_2], math_ops.less_equal)\n",
    "    _test_rel_op([t_1, t_2], math_ops.greater_equal)\n",
    "    _test_rel_op([t_1, t_2], math_ops.equal)\n",
    "    _test_rel_op([t_1, t_2], math_ops.not_equal)\n",
    "\n",
    "\n",
    "def _test_expand_dims(data, axis):\n",
    "    with tf.Graph().as_default():\n",
    "        in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name=\"in1\")\n",
    "        out = tf.expand_dims(in1, axis)\n",
    "        io_info = {\"in_data\": data, \"in_name\": in1.name, \"out_name\": out.name}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_expand_dims():\n",
    "    \"\"\"test tensorflow translator for expand_dims\"\"\"\n",
    "\n",
    "    _test_expand_dims(np.array([1]), -1)\n",
    "    _test_expand_dims(np.array([[1], [2]]), 1)\n",
    "\n",
    "\n",
    "def test_maximum():\n",
    "    \"\"\"test tensorflow translator for maximum\"\"\"\n",
    "\n",
    "    def check_maximum(lh_shape, rh_shape, dtype):\n",
    "        tf.reset_default_graph()\n",
    "        lh_data = np.random.uniform(size=lh_shape).astype(dtype)\n",
    "        rh_data = np.random.uniform(size=rh_shape).astype(dtype)\n",
    "        with tf.Graph().as_default():\n",
    "            lft_data = tf.placeholder(shape=lh_data.shape, dtype=dtype, name=\"lft_data\")\n",
    "            rgt_data = tf.placeholder(shape=rh_data.shape, dtype=dtype, name=\"rgt_data\")\n",
    "            tf.math.maximum(lft_data, rgt_data, name=\"maximum\")\n",
    "            io_info = {\n",
    "                \"in_data\": [lh_data, rh_data],\n",
    "                \"in_name\": [\"lft_data:0\", \"rgt_data:0\"],\n",
    "                \"out_name\": \"maximum:0\",\n",
    "            }\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype=\"float32\")\n",
    "\n",
    "\n",
    "def test_minimum():\n",
    "    \"\"\"test tensorflow translator for minimum\"\"\"\n",
    "\n",
    "    def check_minimum(lh_shape, rh_shape, dtype):\n",
    "        tf.reset_default_graph()\n",
    "        lh_data = np.random.uniform(size=lh_shape).astype(dtype)\n",
    "        rh_data = np.random.uniform(size=rh_shape).astype(dtype)\n",
    "        with tf.Graph().as_default():\n",
    "            lft_data = tf.placeholder(shape=lh_data.shape, dtype=dtype, name=\"lft_data\")\n",
    "            rgt_data = tf.placeholder(shape=rh_data.shape, dtype=dtype, name=\"rgt_data\")\n",
    "            tf.math.minimum(lft_data, rgt_data, name=\"minimum\")\n",
    "            io_info = {\n",
    "                \"in_data\": [lh_data, rh_data],\n",
    "                \"in_name\": [\"lft_data:0\", \"rgt_data:0\"],\n",
    "                \"out_name\": \"minimum:0\",\n",
    "            }\n",
    "            graph_def, golden = get_graph_def(**io_info)\n",
    "        verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "    check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype=\"float32\")\n",
    "\n",
    "\n",
    "def _test_add_n(inputs):\n",
    "    tf.reset_default_graph()\n",
    "    with tf.Graph().as_default():\n",
    "        temp = []\n",
    "        for each in inputs:\n",
    "            temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))\n",
    "        output = tf.add_n(temp)\n",
    "        io_info = {\n",
    "            \"in_data\": list(inputs),\n",
    "            \"in_name\": [each.name for each in temp],\n",
    "            \"out_name\": output.name,\n",
    "        }\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_add_n():\n",
    "    \"\"\"test tensorflow translator for add_n\"\"\"\n",
    "\n",
    "    x_in = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)\n",
    "    y_in = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)\n",
    "    z_in = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)\n",
    "    m_dim, n_dim, o_dim = x_in.astype(np.float32), y_in.astype(np.float32), z_in.astype(np.float32)\n",
    "    in0 = x_in\n",
    "    in1 = [x_in, y_in]\n",
    "    in2 = (x_in, y_in, z_in)\n",
    "    in3 = m_dim\n",
    "    in4 = [m_dim, n_dim]\n",
    "    in5 = (m_dim, n_dim, o_dim)\n",
    "    _test_add_n(in0)\n",
    "    _test_add_n(in1)\n",
    "    _test_add_n(in2)\n",
    "    _test_add_n(in3)\n",
    "    _test_add_n(in4)\n",
    "    _test_add_n(in5)\n",
    "\n",
    "\n",
    "def _test_identityn(data_np_list):\n",
    "    with tf.Graph().as_default():\n",
    "        data_tensors = []\n",
    "        data_tensors_name = []\n",
    "        for index, data_np in enumerate(data_np_list):\n",
    "            tensor_name = f\"data_{index}\"\n",
    "            data_tensors_name.append(tensor_name + \":0\")\n",
    "            data_tensors.append(\n",
    "                tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)\n",
    "            )\n",
    "\n",
    "        output = tf.identity_n(data_tensors)\n",
    "        output_names = [out.name for out in output]\n",
    "        io_info = {\"in_data\": data_np_list, \"in_name\": data_tensors_name, \"out_name\": output_names}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info, use_out_name=False)\n",
    "\n",
    "\n",
    "def test_identityn():\n",
    "    \"\"\"test tensorflow translator for identityn\"\"\"\n",
    "\n",
    "    data_np_list = [\n",
    "        np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),\n",
    "        np.array([1, 2, 3, 4, 5], dtype=np.int64),\n",
    "        np.array([5, 6], dtype=np.int64),\n",
    "    ]\n",
    "    _test_identityn(data_np_list)\n",
    "    data_np_list = [\n",
    "        np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),\n",
    "        np.array([1, 2, 3, 4], dtype=np.int64),\n",
    "        np.array([5, 6], dtype=np.int64),\n",
    "        np.array([True, False, True]),\n",
    "    ]\n",
    "    _test_identityn(data_np_list)\n",
    "\n",
    "\n",
    "def _test_infinity(tf_op, name):\n",
    "    \"\"\"test operator infinity ops\"\"\"\n",
    "\n",
    "    # Only float types are allowed in Tensorflow for isfinite and isinf\n",
    "    # float16 is failing on cuda\n",
    "    tf_dtypes = [\"float32\", \"float64\"]  # pylint: disable=redefined-outer-name\n",
    "    for tf_dtype in tf_dtypes:\n",
    "        shape = (8, 8)\n",
    "        data = np.random.uniform(size=shape).astype(tf_dtype)\n",
    "        data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.inf\n",
    "        data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan\n",
    "\n",
    "        tf.reset_default_graph()\n",
    "        in_data = tf.placeholder(tf_dtype, shape, name=\"in_data\")\n",
    "        tf_op(in_data, name=name)\n",
    "        io_info = {\"in_data\": data, \"in_name\": \"in_data:0\", \"out_name\": f\"{name}:0\"}\n",
    "        graph_def, golden = get_graph_def(**io_info)\n",
    "    verify_model(graph_def, golden, **io_info)\n",
    "\n",
    "\n",
    "def test_infinity():\n",
    "    \"\"\"test tensorflow translator for infinity\"\"\"\n",
    "\n",
    "    _test_infinity(tf.is_inf, \"isinf\")\n",
    "    _test_infinity(tf.is_finite, \"isfinite\")\n",
    "    _test_infinity(tf.is_nan, \"isnan\")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "xxx",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
