{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "825b6c3c",
   "metadata": {},
   "source": [
    "# caffe BatchNorm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "cc1ce122",
   "metadata": {
    "tags": [
     "remove-cell"
    ]
   },
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "from tvm_book.config import env\n",
    "# 设置 caffeprotobuf环境\n",
    "env.set_caffeproto(Path(env.__file__).parents[3]/\"tests/caffeproto\")\n",
    "# 设置tvm环境\n",
    "env.set_tvm(\"/media/pc/data/board/arria10/lxw/tasks/tvm-test\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "6b31d63f",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "\n",
    "from google.protobuf import text_format\n",
    "import caffe_pb2 as pb2\n",
    "\n",
    "temp_dir = Path(\".temp\")\n",
    "temp_dir.mkdir(exist_ok=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "46218864",
   "metadata": {},
   "outputs": [],
   "source": [
    "text = \"\"\"\n",
    "layer {\n",
    "  name: \"data\"\n",
    "  type: \"Input\"\n",
    "  top: \"data\"\n",
    "  input_param {\n",
    "    shape {\n",
    "      dim: 1\n",
    "      dim: 3\n",
    "      dim: 10\n",
    "      dim: 10\n",
    "    }\n",
    "  }\n",
    "}\n",
    "layer {\n",
    "\tbottom: \"data\"\n",
    "\ttop: \"bn\"\n",
    "\tname: \"bn\"\n",
    "\ttype: \"BatchNorm\"\n",
    "\tbatch_norm_param {\n",
    "\t\tuse_global_stats: true\n",
    "\t}\n",
    "}\n",
    "layer {\n",
    "\tbottom: \"bn\"\n",
    "\ttop: \"bn\"\n",
    "\tname: \"scale\"\n",
    "\ttype: \"Scale\"\n",
    "\tscale_param {\n",
    "\t\tbias_term: true\n",
    "\t}\n",
    "}\n",
    "\"\"\"\n",
    "predict_net = text_format.Merge(text, pb2.NetParameter())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a4aeb516",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'data': data}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from tvm.relax.testing import nn\n",
    "from tvm.relax import op as _op\n",
    "exp_tab = {} # 存储节点\n",
    "dtype = \"float32\"\n",
    "# 优先处理输入层\n",
    "for pl in predict_net.layer:\n",
    "    name = pl.name\n",
    "    if pl.type == \"Input\":\n",
    "        shape = pl.input_param.shape\n",
    "        assert len(shape)==1 and len(pl.top)==1, \"Input 类型仅仅支持单输入单输出\"\n",
    "        shape = list(shape[0].dim)\n",
    "        exp_tab[name] = nn.Placeholder(shape, dtype, name)\n",
    "for pl in predict_net.layer:\n",
    "    name = pl.name\n",
    "    if pl.type == \"BatchNorm\":\n",
    "        assert len(pl.bottom) == 1\n",
    "        inp = exp_tab[pl.bottom[0]]\n",
    "        n, c, h, w = [int(sh) for sh in inp.struct_info.shape]\n",
    "        break\n",
    "        # exp_tab[name] = _op.concat(inputs, axis=pl.concat_param.axis)\n",
    "exp_tab"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "ef864cb5",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm_book.frontend.caffe import check_unsupported_ops\n",
    "supported_op_names = [\n",
    "    \"BatchNorm\",\n",
    "    \"Concat\",\n",
    "    \"Convolution\",\n",
    "    \"Crop\",\n",
    "    \"Deconvolution\",\n",
    "    \"Dropout\",\n",
    "    \"Eltwise\",\n",
    "    \"Embed\",\n",
    "    \"Flatten\",\n",
    "    \"InnerProduct\",\n",
    "    \"Input\",\n",
    "    \"LRN\",\n",
    "    \"Permute\",\n",
    "    \"Pooling\",\n",
    "    \"Power\",\n",
    "    \"PReLU\",\n",
    "    \"ReLU\",\n",
    "    \"Reshape\",\n",
    "    \"Scale\",\n",
    "    \"Sigmoid\",\n",
    "    \"Slice\",\n",
    "    \"Softmax\",\n",
    "    \"TanH\",\n",
    "    \"Upsample\",\n",
    "    \"Reduction\",\n",
    "]\n",
    "check_unsupported_ops(predict_net.layer, supported_op_names)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "5ce295df",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "blob_file = \"/media/pc/data/board/arria10/lxw/tasks/tools/npuusertools/models/caffe/resnet50/ResNet-50-model.caffemodel\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "292433b1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(pl.blobs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "a9778664",
   "metadata": {},
   "outputs": [],
   "source": [
    "from caffe_fuse import fuse_network, get_bn_params\n",
    "from caffe_utils import unity_struct\n",
    "proto_file = \"/media/pc/data/board/arria10/lxw/tasks/tools/npuusertools/models/caffe/resnet50/ResNet-50-deploy.prototxt\"\n",
    "blob_file = \"/media/pc/data/board/arria10/lxw/tasks/tools/npuusertools/models/caffe/resnet50/ResNet-50-model.caffemodel\"\n",
    "# 加载网络定义和参数\n",
    "init_net = pb2.NetParameter()\n",
    "predict_net = pb2.NetParameter()\n",
    "with open(proto_file, 'r') as f:\n",
    "    text_format.Merge(f.read(), predict_net)\n",
    "with open(blob_file, 'rb') as fp:\n",
    "    init_net.ParseFromString(fp.read())\n",
    "predict_net = unity_struct(predict_net)\n",
    "init_net, predict_net = fuse_network(init_net, predict_net)\n",
    "with open(temp_dir/\"test.prototxt\", \"w\") as fp: # 保存网络结构\n",
    "    fp.write(text_format.MessageToString(predict_net))\n",
    "with open(temp_dir/\"test.caffemodel\", \"wb\") as fp: # 保存网络权重\n",
    "    fp.write(init_net.SerializeToString())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "ada98d54",
   "metadata": {},
   "outputs": [],
   "source": [
    "bn_layer = init_net.layer[1]\n",
    "use_layer_field = bool(init_net.layer)  # 判断用layer还是layers字段\n",
    "init_layers = init_net.layer if use_layer_field else init_net.layers\n",
    "init_layer_dict = {il.name: il for il in init_layers}\n",
    "bn_params = get_bn_params(init_layer_dict, bn_layer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "0d31a347",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[31mSignature:\u001b[39m\n",
      "_op.nn.batch_norm(\n",
      "    data: tvm.ir.expr.RelaxExpr,\n",
      "    gamma: tvm.ir.expr.RelaxExpr,\n",
      "    beta: tvm.ir.expr.RelaxExpr,\n",
      "    moving_mean: tvm.ir.expr.RelaxExpr,\n",
      "    moving_var: tvm.ir.expr.RelaxExpr,\n",
      "    axis: int,\n",
      "    epsilon: float = \u001b[32m1e-05\u001b[39m,\n",
      "    center: bool = \u001b[38;5;28;01mTrue\u001b[39;00m,\n",
      "    scale: bool = \u001b[38;5;28;01mTrue\u001b[39;00m,\n",
      "    momentum: float = \u001b[32m0.1\u001b[39m,\n",
      "    training: bool = \u001b[38;5;28;01mTrue\u001b[39;00m,\n",
      ") -> tvm.ir.expr.RelaxExpr\n",
      "\u001b[31mDocstring:\u001b[39m\n",
      "Batch normalization layer (Ioffe and Szegedy, 2014).\n",
      "\n",
      "Normalizes the input at each batch, i.e. applies a transformation\n",
      "that maintains the mean activation close to 0 and the activation\n",
      "standard deviation close to 1.\n",
      "\n",
      ".. math::\n",
      "\n",
      "    data\\_mean[i] = mean(data[:,i,:,...]) \\\\\n",
      "    data\\_var[i] = var(data[:,i,:,...])\n",
      "\n",
      "Both *mean* and *var* returns a scalar by treating the input as a vector.\n",
      "\n",
      "Then compute the normalized output, which has the same shape as input, as following:\n",
      "\n",
      ".. math::\n",
      "\n",
      "    out[:,i,:,...] = \\frac{data[:,i,:,...] - data\\_mean[i]}{\\sqrt{data\\_var[i]+\\epsilon}}\n",
      "        * gamma[i] + beta[i]\n",
      "\n",
      "Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``\n",
      "have shape *(k,)*.\n",
      "\n",
      "Besides the inputs and the outputs, this operator accepts two auxiliary\n",
      "states, ``moving_mean`` and ``moving_var``, which are *k*-length\n",
      "vectors. They are global statistics for the whole dataset, which are updated by\n",
      "\n",
      ".. code:: python\n",
      "\n",
      "    moving_mean = moving_mean * momentum + data_mean * (1 - momentum)\n",
      "    moving_var = moving_var * momentum + data_var * (1 - momentum)\n",
      "\n",
      "The parameter ``axis`` specifies which axis of the input shape denotes\n",
      "the 'channel' (separately normalized groups).  The default is 1.\n",
      "Specifying -1 sets the channel axis to be the last item in the input shape.\n",
      "\n",
      ".. note::\n",
      "\n",
      "    This operator has two modes:\n",
      "\n",
      "    - Training mode.\n",
      "        - Use the mean and var computed from THIS batch to normalize.\n",
      "        - Update and then return the running mean and running var.\n",
      "\n",
      "    - Inference mode.\n",
      "        - Use the running_mean and running_var parameters to normalize.\n",
      "        - Do not update the running mean and running var. Just return the original value.\n",
      "\n",
      "    In the legalization stage, this operator will be legalized to the training mode by default.\n",
      "\n",
      "    You can use tvm.relax.transform.DecomposeOpsForInference to decompose the operator, so it\n",
      "    executes the inference mode computation. Similarly, use\n",
      "    tvm.relax.transform.DecomposeOpsForTraining to execute the training mode computation.\n",
      "\n",
      "Parameters\n",
      "----------\n",
      "data : relax.Expr\n",
      "    The input data to the operator.\n",
      "\n",
      "gamma : relax.Expr\n",
      "    The gamma scale factor.\n",
      "\n",
      "beta : relax.Expr\n",
      "    The beta offset factor.\n",
      "\n",
      "moving_mean : relax.Expr\n",
      "    Running mean of input.\n",
      "\n",
      "moving_var : relax.Expr\n",
      "    Running variance of input.\n",
      "\n",
      "axis : int\n",
      "    The axis along which the normalization is applied.\n",
      "\n",
      "epsilon : float\n",
      "    Small float added to variance to avoid dividing by zero.\n",
      "\n",
      "center : bool\n",
      "    Indicating if the beta offset will be added to the normalized tensor.\n",
      "\n",
      "scale : bool\n",
      "    Indicating if the gamma scale will be multiplied.\n",
      "\n",
      "momentum : float\n",
      "    The value used for the moving_mean and moving_var update.\n",
      "\n",
      "training : bool\n",
      "    A boolean value to indicate whether training or in eval mode. By default.\n",
      "      relax batch_norm is training mode. To transform it to inference mode,\n",
      "      can use DecomposeOpsForInference.\n",
      "\n",
      "Returns\n",
      "-------\n",
      "result : relax.Expr\n",
      "    The computed result.\n",
      "\u001b[31mFile:\u001b[39m      /media/pc/data/lxw/ai/tvm/python/tvm/relax/op/nn/nn.py\n",
      "\u001b[31mType:\u001b[39m      function"
     ]
    }
   ],
   "source": [
    "_op.nn.batch_norm?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e560d91",
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "module 'tvm.relax.op' has no attribute 'const'",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mAttributeError\u001b[39m                            Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[91]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[43m_op\u001b[49m\u001b[43m.\u001b[49m\u001b[43mconst\u001b[49m\n",
      "\u001b[31mAttributeError\u001b[39m: module 'tvm.relax.op' has no attribute 'const'"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "id": "6107fee7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3"
      ]
     },
     "execution_count": 90,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(bn_layer.blobs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c4ba07eb",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
