{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 解读 {class}`~tvm.relay.analysis._ffi_api._test_type_solver`\n",
    "\n",
    "参考：`tvm/tests/python/relay/test_type_solver.py` & `tvm/src/relay/analysis/type_solver.h`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "from tvm import relay\n",
    "from tvm.relay import testing\n",
    "\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def make_rel(name, args, num_inputs=None, attrs=None):\n",
    "    func = tvm.ir.EnvFunc.get(\"tvm.relay.type_relation.\" + name)\n",
    "    if num_inputs is None:\n",
    "        num_inputs = len(args) - 1\n",
    "    return relay.ty.TypeRelation(func, args, num_inputs, attrs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[0;31mInit signature:\u001b[0m \u001b[0mrelay\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mty\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTypeRelation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mSource:\u001b[0m        \n",
      "\u001b[0;34m@\u001b[0m\u001b[0mtvm\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_ffi\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mregister_object\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TypeRelation\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\u001b[0;32mclass\u001b[0m \u001b[0mTypeRelation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mTypeConstraint\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m\u001b[0m    \u001b[0;34m\"\"\"User defined type relation, it is an input-output relation on types.\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m    TypeRelation is more generalized than TypeCall as it allows inference\u001b[0m\n",
      "\u001b[0;34m     of both inputs and outputs.\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m    Parameters\u001b[0m\n",
      "\u001b[0;34m    ----------\u001b[0m\n",
      "\u001b[0;34m    func : EnvFunc\u001b[0m\n",
      "\u001b[0;34m        User defined relation function.\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m    args : [tvm.ir.Type]\u001b[0m\n",
      "\u001b[0;34m        List of types to the func.\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m    num_inputs : int\u001b[0m\n",
      "\u001b[0;34m        Number of input arguments in args,\u001b[0m\n",
      "\u001b[0;34m        this act as a hint for type inference.\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m    attrs : Attrs\u001b[0m\n",
      "\u001b[0;34m        The attribute attached to the relation information\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m    Returns\u001b[0m\n",
      "\u001b[0;34m    -------\u001b[0m\n",
      "\u001b[0;34m    type_relation : tvm.ir.TypeRelation\u001b[0m\n",
      "\u001b[0;34m        The type relation.\u001b[0m\n",
      "\u001b[0;34m    \"\"\"\u001b[0m\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m\u001b[0m    \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\n",
      "\u001b[0;34m\u001b[0m        \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init_handle_by_constructor__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_ffi_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTypeRelation\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mFile:\u001b[0m           /media/pc/data/lxw/ai/tvm/python/tvm/ir/type_relation.py\n",
      "\u001b[0;31mType:\u001b[0m           type\n",
      "\u001b[0;31mSubclasses:\u001b[0m     "
     ]
    }
   ],
   "source": [
    "relay.ty.TypeRelation??"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "{class}`~tvm.ir.type_relation.TypeRelation` 类，继承自 `TypeConstraint`。它表示用户定义的类型关系，是类型上的输入输出关系。\n",
    "\n",
    "{class}`~tvm.ir.type_relation.TypeRelation` 比 {class}`~tvm.ir.type_relation.TypeCall` 更通用，因为它允许推断输入和输出。\n",
    "\n",
    "参数：\n",
    "- `func`: {class}`~tvm.ir.base.EnvFunc`，用户定义的关系函数。\n",
    "- `args`: `[tvm.ir.Type]`，要传递给 `func` 的类型列表。\n",
    "- `num_inputs`: `int`，`args` 中的输入参数数量，这作为类型推断的提示。\n",
    "- `attrs`: `Attrs`，附加到关系信息的属性。\n",
    "\n",
    "返回值：\n",
    "- `type_relation`: `tvm.ir.TypeRelation`，类型关系。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def make_solver():\n",
    "    solver = relay.analysis._ffi_api._test_type_solver()\n",
    "    solver.Solve = solver(\"Solve\")\n",
    "    solver.Unify = solver(\"Unify\")\n",
    "    solver.Resolve = solver(\"Resolve\")\n",
    "    solver.AddConstraint = solver(\"AddConstraint\")\n",
    "\n",
    "    def gen_type(name, args, out=None):\n",
    "        out = out if out else relay.ty.IncompleteType()\n",
    "        solver.AddConstraint(make_rel(name, args + [out]))\n",
    "        return out\n",
    "\n",
    "    solver.gen_type = gen_type\n",
    "    return solver"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "```c++\n",
    "\n",
    "/*!\n",
    " * \\brief Interface of type solver used in type inference.\n",
    " *\n",
    " * TypeSolver works on a list of constraints among incomplete types.\n",
    " * The user will populate the constraints by AddConstraint and Assign.\n",
    " * Then we can call Solve to trying to resolve the unknown.\n",
    " *\n",
    " * This can be viewed as \"type program(computational graph)\" of types, where\n",
    " * the type constraint are operators of the graph and the incomplete\n",
    " * types are intermediate value of the graph.\n",
    " * If all the input types are concretely known, we should be able to\n",
    " * just run a forward pass on the \"type program\" to get all the types.\n",
    " *\n",
    " * The list of constraints representation means we are storing it as a bipartite\n",
    " * graph instead of a DAG. This is because some constraints might go both direction.\n",
    " * TypeSolver could take advantage of bidirectional constraints to deduce input\n",
    " * value given output ones. Never-the-less, we should keep in mind that\n",
    " * there is a \"forward direction\" that the TypeSolver should take advantage of.\n",
    " */\n",
    "class TypeSolver {\n",
    " public:\n",
    "  TypeSolver(const GlobalVar& current_func, DiagnosticContext diag_ctx);\n",
    "  ~TypeSolver();\n",
    "  /*!\n",
    "   * \\brief Add a type constraint to the solver.\n",
    "   * \\param constraint The constraint to be added.\n",
    "   * \\param location The location at which the constraint was incurred.\n",
    "   */\n",
    "  void AddConstraint(const TypeConstraint& constraint, const Span& span);\n",
    "  /*!\n",
    "   * \\brief Resolve type to the solution type in the solver.\n",
    "   * \\param type The type to be resolved.\n",
    "   * \\return The resolved type.\n",
    "   */\n",
    "  Type Resolve(const Type& type);\n",
    "  /*!\n",
    "   * \\brief Start to solve the types using the current known information.\n",
    "   * \\return Whether all the incomplete types has been fully resolved.\n",
    "   */\n",
    "  bool Solve();\n",
    "  /*!\n",
    "   * \\brief Unify lhs and rhs.\n",
    "   * \\param lhs The left operand.\n",
    "   * \\param rhs The right operand\n",
    "   * \\param location The location at which the unification problem arose.\n",
    "   */\n",
    "  Type Unify(const Type& lhs, const Type& rhs, const Span& span, bool assign_lhs = true,\n",
    "             bool assign_rhs = true);\n",
    "  /*!\n",
    "   * \\brief Report a diagnostic.\n",
    "   * \\param diag The diagnostic to report.\n",
    "   */\n",
    "  void Emit(const Diagnostic& diag) { diag_ctx_.Emit(diag); }\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "这段代码是类型推断器（TypeSolver）的接口定义。类型推断器用于解决类型之间的约束关系，以确定未知类型的具体值。\n",
    "\n",
    "该接口包含以下方法：\n",
    "- `AddConstraint`: 向求解器中添加类型约束。参数包括要添加的约束和约束发生的位置。\n",
    "- `Resolve`: 将给定的类型解析为解决方案类型。参数是要解析的类型，返回解析后的类型。\n",
    "- `Solve`: 使用当前已知信息开始解决类型。返回是否所有不完整类型都已完全解析。\n",
    "- `Unify`: 统一 lhs 和 rhs。参数包括左操作数、右操作数以及问题出现的位置。还可以指定是否分配给 lhs 和 rhs。\n",
    "- `Emit`: 报告诊断信息。参数是要报告的诊断信息。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 广播的类型推断(solver)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "solver = make_solver()\n",
    "t0 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "t1 = relay.ty.TensorType((10, 1), \"float32\")\n",
    "tc = relay.ty.TensorType((10, 1, 1), \"float32\")\n",
    "t2 = solver.gen_type(\"Broadcast\", [t0, t1])\n",
    "t3 = solver.gen_type(\"Identity\", [t2])\n",
    "t4 = solver.gen_type(\"Broadcast\", [t3, tc])\n",
    "assert solver.Solve()\n",
    "assert solver.Resolve(t2) == relay.ty.TensorType((10, 20), \"float32\")\n",
    "assert solver.Resolve(t4) == relay.ty.TensorType((10, 10, 20), \"float32\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 其他类型推断(solver)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_backward_solving():\n",
    "    solver = make_solver()\n",
    "    t0 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "    tc = relay.ty.TensorType((10, 1, 1), \"float32\")\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t3 = solver.gen_type(\"Broadcast\", [t0, t1])\n",
    "    t2 = solver.gen_type(\"Identity\", [t1], out=tc)\n",
    "    assert solver.Solve()\n",
    "    assert solver.Resolve(t3) == relay.ty.TensorType((10, 10, 20), \"float32\")\n",
    "\n",
    "\n",
    "def test_unify_tuple():\n",
    "    solver = make_solver()\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "    t3 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "\n",
    "    tup1 = relay.ty.TupleType([t1, t2])\n",
    "    tup2 = relay.ty.TupleType([t3, t3])\n",
    "\n",
    "    unified = solver.Unify(tup1, tup2)\n",
    "    assert unified == tup2\n",
    "\n",
    "\n",
    "def test_unify_global_type_var():\n",
    "    # should only be able to unify if they're the same\n",
    "    solver = make_solver()\n",
    "    gtv = relay.GlobalTypeVar(\"gtv\")\n",
    "    unified = solver.Unify(gtv, gtv)\n",
    "    assert unified == gtv\n",
    "\n",
    "\n",
    "def test_unify_typecall():\n",
    "    solver = make_solver()\n",
    "    gtv = relay.GlobalTypeVar(\"gtv\")\n",
    "\n",
    "    # yeah, typecalls are shaped like tuples so the same\n",
    "    # tests work out\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "    t3 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "\n",
    "    tc1 = relay.ty.TypeCall(gtv, [t1, t2])\n",
    "    tc2 = relay.ty.TypeCall(gtv, [t3, t3])\n",
    "    unified = solver.Unify(tc1, tc2)\n",
    "    assert unified == tc2\n",
    "\n",
    "\n",
    "def test_unify_functype():\n",
    "    solver = make_solver()\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "    t3 = relay.ty.IncompleteType()\n",
    "\n",
    "    unit = relay.ty.TupleType([])\n",
    "    tensor1 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "    tensor2 = relay.ty.TensorType((10,), \"float32\")\n",
    "\n",
    "    ft1 = relay.ty.FuncType([t1, t2], t3)\n",
    "    ft2 = relay.ty.FuncType([tensor1, tensor2], unit)\n",
    "\n",
    "    unified = solver.Unify(ft1, ft2)\n",
    "    assert unified == ft2\n",
    "\n",
    "\n",
    "def test_recursive_unify():\n",
    "    solver = make_solver()\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "    t3 = relay.ty.IncompleteType()\n",
    "\n",
    "    tensor1 = relay.ty.TensorType((10, 10, 20), \"float32\")\n",
    "    tensor2 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "    tensor3 = relay.ty.TensorType((10,), \"float32\")\n",
    "\n",
    "    tup1 = relay.ty.TupleType([relay.ty.TupleType([t1, t2]), t2])\n",
    "    tup2 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor2])\n",
    "\n",
    "    ft1 = relay.ty.FuncType([tup1, t3], t3)\n",
    "    ft2 = relay.ty.FuncType([tup2, tensor3], tensor3)\n",
    "\n",
    "    unified = solver.Unify(ft1, ft2)\n",
    "    assert unified == ft2\n",
    "\n",
    "\n",
    "def test_unify_vars_under_tuples():\n",
    "    solver = make_solver()\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "\n",
    "    tup1 = relay.ty.TupleType([t1, t1])\n",
    "    unified = solver.Unify(tup1, tup1)\n",
    "    assert unified == tup1\n",
    "\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "    tup2 = relay.ty.TupleType([t2, t2])\n",
    "\n",
    "    tup3 = relay.ty.TupleType([t1, t2])\n",
    "    tup4 = relay.ty.TupleType([t2, t1])\n",
    "    unified = solver.Unify(tup3, tup4)\n",
    "    assert unified == tup1 or unified == tup2\n",
    "\n",
    "\n",
    "def test_binding_over_typevars():\n",
    "    solver = make_solver()\n",
    "\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "\n",
    "    a = relay.ty.TypeVar(\"a\")\n",
    "    b = relay.ty.TypeVar(\"b\")\n",
    "    c = relay.ty.TypeVar(\"c\")\n",
    "    d = relay.ty.TypeVar(\"d\")\n",
    "\n",
    "    ft1 = relay.ty.FuncType([t1], t2, [c, d])\n",
    "    ft2 = relay.ty.FuncType([a], b, [a, b])\n",
    "    unified = solver.Unify(ft1, ft2)\n",
    "    assert unified == solver.Resolve(ft1)\n",
    "\n",
    "\n",
    "def test_recursive_backward_solving():\n",
    "    solver = make_solver()\n",
    "\n",
    "    tensor1 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "    tensor2 = relay.ty.TensorType((10, 1, 1), \"float32\")\n",
    "    tensor3 = relay.ty.TensorType((10,), \"float32\")\n",
    "\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "    t3 = relay.ty.IncompleteType()\n",
    "\n",
    "    tup1 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor3])\n",
    "    tup2 = relay.ty.TupleType([relay.ty.TupleType([t1, t2]), t3])\n",
    "    solver.gen_type(\"Identity\", [tup1], out=tup2)\n",
    "\n",
    "    assert solver.Solve()\n",
    "    assert solver.Resolve(tup2) == tup1\n",
    "\n",
    "\n",
    "def test_backward_solving_after_child_update():\n",
    "    solver = make_solver()\n",
    "\n",
    "    tensor1 = relay.ty.TensorType((10, 20), \"float32\")\n",
    "    tensor2 = relay.ty.TensorType((10, 1, 1), \"float32\")\n",
    "\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "    t3 = relay.ty.IncompleteType()\n",
    "\n",
    "    tup1 = relay.ty.TupleType([t1, t2])\n",
    "    tup2 = relay.ty.TupleType([t1, t3])\n",
    "\n",
    "    tup_concrete = relay.ty.TupleType([tensor1, tensor2])\n",
    "\n",
    "    t4 = solver.gen_type(\"Identity\", [tup1])\n",
    "    t5 = solver.gen_type(\"Identity\", [tup2])\n",
    "\n",
    "    solver.gen_type(\"Identity\", [t4], out=t5)\n",
    "    assert solver.Solve()\n",
    "    assert solver.Resolve(t3) == t3 or solver.Resolve(t3) == t2\n",
    "    assert solver.Resolve(t4) == tup1 or solver.Resolve(t4) == tup2\n",
    "    assert solver.Resolve(t5) == tup1 or solver.Resolve(t5) == tup2\n",
    "\n",
    "    # updating the variables *inside* tup1 and tup2 should update t4 and t5\n",
    "    solver.gen_type(\"Identity\", [t1], out=tensor1)\n",
    "    solver.gen_type(\"Identity\", [t2], out=tensor2)\n",
    "    assert solver.Solve()\n",
    "    assert solver.Resolve(t4) == tup_concrete\n",
    "    assert solver.Resolve(t5) == tup_concrete\n",
    "\n",
    "\n",
    "def test_unify_quantified_funcs():\n",
    "    solver = make_solver()\n",
    "    a, b, c = relay.TypeVar(\"a\"), relay.TypeVar(\"b\"), relay.TypeVar(\"c\")\n",
    "    ft1 = relay.FuncType([a, b], c, [a, b, c])\n",
    "    ft2 = relay.FuncType([a, a], a, [a])\n",
    "    unified = solver.Unify(ft1, ft2)\n",
    "    assert unified == ft2\n",
    "\n",
    "    ft3 = relay.FuncType([a], a, [a])\n",
    "    ft4 = relay.FuncType([b], c, [b, c])\n",
    "    unified = solver.Unify(ft3, ft4)\n",
    "    assert unified == ft3\n",
    "\n",
    "\n",
    "def test_unify_quantified_func_and_concrete():\n",
    "    solver = make_solver()\n",
    "    a, b = relay.TypeVar(\"a\"), relay.TypeVar(\"b\")\n",
    "    ft1 = relay.FuncType([a], b, [a, b])\n",
    "    ft2 = relay.FuncType([b], relay.TupleType([]), [b])\n",
    "    unified = solver.Unify(ft1, ft2)\n",
    "    assert unified == ft2\n",
    "\n",
    "\n",
    "def test_unify_quantified_funcs_nesting():\n",
    "    solver = make_solver()\n",
    "    a, b, c = relay.TypeVar(\"a\"), relay.TypeVar(\"b\"), relay.TypeVar(\"c\")\n",
    "\n",
    "    ft1 = relay.FuncType([a, relay.TupleType([b, c])], relay.TupleType([a, b, c]), [a, b, c])\n",
    "    ft2 = relay.FuncType([a, relay.TupleType([a, a])], relay.TupleType([a, a, a]), [a])\n",
    "    unified = solver.Unify(ft1, ft2)\n",
    "    assert unified == ft2\n",
    "\n",
    "\n",
    "def test_unify_quantified_funcs_var_order():\n",
    "    solver = make_solver()\n",
    "    a, b, c = relay.TypeVar(\"a\"), relay.TypeVar(\"b\"), relay.TypeVar(\"c\")\n",
    "\n",
    "    ft1 = relay.FuncType([a, relay.TupleType([b, c])], relay.TupleType([a, b, c]), [a, b, c])\n",
    "    ft2 = relay.FuncType([a, relay.TupleType([a, c])], relay.TupleType([a, a, c]), [a, c])\n",
    "    # unified = solver.Unify(ft1, ft2) # crashes here but it shouldn't\n",
    "    # assert unified == ft2\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 不兼容的类型推断"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pytest\n",
    "\n",
    "@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)\n",
    "def test_incompatible_tuple_unification():\n",
    "    solver = make_solver()\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    t2 = relay.ty.IncompleteType()\n",
    "\n",
    "    tensor1 = relay.ty.TensorType((1, 2, 3), \"float32\")\n",
    "    tensor2 = relay.ty.TensorType((2, 3), \"float32\")\n",
    "    tensor3 = relay.ty.TensorType((3,), \"float32\")\n",
    "\n",
    "    tup1 = relay.ty.TupleType([relay.ty.TupleType([t1, t1]), t2])\n",
    "    tup2 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor3])\n",
    "    solver.Unify(tup1, tup2)\n",
    "\n",
    "\n",
    "@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)\n",
    "def test_bad_recursive_unification():\n",
    "    solver = make_solver()\n",
    "    t1 = relay.ty.IncompleteType()\n",
    "    solver.Unify(t1, relay.ty.TupleType([t1, t1]))\n",
    "\n",
    "\n",
    "@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)\n",
    "def test_unify_invalid_global_typevars():\n",
    "    solver = make_solver()\n",
    "    gtv1 = relay.GlobalTypeVar(\"gtv1\")\n",
    "    gtv2 = relay.GlobalTypeVar(\"gtv2\")\n",
    "    solver.Unify(gtv1, gtv2)\n",
    "\n",
    "\n",
    "@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)\n",
    "def test_incompatible_typecall_var_unification():\n",
    "    solver = make_solver()\n",
    "    gtv1 = relay.GlobalTypeVar(\"gtv1\")\n",
    "    gtv2 = relay.GlobalTypeVar(\"gtv2\")\n",
    "\n",
    "    t1 = relay.IncompleteType()\n",
    "    t2 = relay.IncompleteType()\n",
    "\n",
    "    tc1 = relay.TypeCall(gtv1, [t1])\n",
    "    tc2 = relay.TypeCall(gtv2, [t2])\n",
    "    solver.Unify(tc1, tc2)\n",
    "\n",
    "\n",
    "@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)\n",
    "def test_incompatible_typecall_args_unification():\n",
    "    solver = make_solver()\n",
    "    gtv = relay.GlobalTypeVar(\"gtv1\")\n",
    "    t1 = relay.IncompleteType()\n",
    "    t2 = relay.IncompleteType()\n",
    "\n",
    "    tensor1 = relay.TensorType((1, 2, 3), \"float32\")\n",
    "    tensor2 = relay.TensorType((2, 3), \"float32\")\n",
    "    tensor3 = relay.TensorType((3,), \"float32\")\n",
    "\n",
    "    tc1 = relay.TypeCall(gtv, [relay.TupleType([t1, t1]), t2])\n",
    "    tc2 = relay.TypeCall(gtv, [relay.TupleType([tensor1, tensor2]), tensor3])\n",
    "    solver.Unify(tc1, tc2)\n",
    "\n",
    "\n",
    "@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)\n",
    "def test_incompatible_quantified_func_unification():\n",
    "    solver = make_solver()\n",
    "    a, b, c = relay.TypeVar(\"a\"), relay.TypeVar(\"b\"), relay.TypeVar(\"c\")\n",
    "\n",
    "    ft1 = relay.FuncType([a, b], c, [a, b, c])\n",
    "    ft2 = relay.FuncType([b, c], relay.TupleType([a]), [a, b, c])\n",
    "    solver.Unify(ft1, ft2)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 测试在布局转换过程中整数的兼容性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "One or more operators have not been tuned. Please tune your model for better performance. Use DEBUG logging level to see more details.\n"
     ]
    }
   ],
   "source": [
    "x = relay.var(\"data\", shape=(2, 3, 48, 48), dtype=\"float32\")\n",
    "conv_out = relay.nn.conv2d(\n",
    "    x,\n",
    "    relay.var(\"weight\", shape=(1, 3, 1, 1), dtype=\"float32\"),\n",
    "    strides=[47, 47],\n",
    "    channels=1,\n",
    "    kernel_size=[1, 1],\n",
    ")\n",
    "bias_out = relay.nn.bias_add(conv_out, relay.var(\"bias\"))\n",
    "broadcast_out = relay.op.broadcast_to(bias_out, relay.const([2, 1, 2, 2], dtype=\"int64\"))\n",
    "y = relay.add(bias_out, broadcast_out)\n",
    "\n",
    "mod, _ = testing.create_workload(y)\n",
    "with tvm.transform.PassContext(opt_level=3):\n",
    "    with tvm.target.Target(\"llvm\"):\n",
    "        mod = relay.transform.CanonicalizeOps()(mod)\n",
    "        mod = relay.transform.AlterOpLayout()(mod)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1. 首先定义了一个变量 `x`，它是一个形状为 (2, 3, 48, 48) 的浮点数张量。\n",
    "2. 然后使用 `relay.nn.conv2d` 函数进行卷积操作，输入是 `x`，权重的形状为 (1, 3, 1, 1)，步长为 [47, 47]，通道数为 1，卷积核大小为 [1, 1]。\n",
    "3. 接下来使用 `relay.nn.bias_add` 函数将偏置项添加到卷积输出上，得到 `bias_out`。\n",
    "4. 使用 `relay.op.broadcast_to` 函数将 `bias_out` 广播到形状为 [2, 1, 2, 2] 的张量，得到 `broadcast_out`。\n",
    "5. 最后，将 `bias_out` 和 `broadcast_out` 相加，得到结果 `y`。\n",
    "6. 创建一个工作负载 `mod`，其中包含计算图 `y`。\n",
    "7. 使用 TVM 的优化级别为 3 的上下文，将目标设置为 \"llvm\"。\n",
    "8. 对计算图 `mod` 进行规范化操作，然后进行布局转换操作。\n",
    "\n",
    "这段代码的目的是测试在布局转换过程中整数的兼容性，确保在不同类型的数据之间进行操作时不会引发错误或异常。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
