{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.0.0\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import time\n",
    "\n",
    "print(tf.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "import traceback\n",
    "import contextlib\n",
    "\n",
    "\n",
    "# 追踪不同类型错误\n",
    "@contextlib.contextmanager\n",
    "def assert_raises(error_class):\n",
    "    try:\n",
    "        yield\n",
    "    except error_class as e:\n",
    "        print('Caught expected exception \\n  {}:'.format(error_class))\n",
    "        traceback.print_exc(limit=2)\n",
    "    except Exception as e:\n",
    "        raise e\n",
    "    else:\n",
    "        raise Exception(\n",
    "            'Expected {} to be raised but no error was raised!'.format(\n",
    "                error_class))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "`tf.function` 模块，集合 `AutoGraph` 机制，通过 `@tf.function` 修饰符将模型转换成图模式执行\n",
    "> - 被其修饰的函数内，可使用的语句有一定的限制，且操作本身能够被构建为计算图\n",
    "- 在函数内只使用 `TensorFlow` 的原生操作，不要使用过于复杂的 `Python` 语句，函数参数只包括 `TensorFlow` 张量或 `NumPy` 数组"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 使用示例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "############### 简单模型 #####################\n",
    "class CNN(tf.keras.Model):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.conv1 = tf.keras.layers.Conv2D(\n",
    "            filters=32,             # 卷积层神经元（卷积核）数目\n",
    "            kernel_size=[5, 5],     # 感受野大小\n",
    "            padding='same',         # padding策略（vaild 或 same）\n",
    "            activation=tf.nn.relu,   # 激活函数\n",
    "        )\n",
    "        self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)\n",
    "        self.conv2 = tf.keras.layers.Conv2D(\n",
    "            filters=64,\n",
    "            kernel_size=[5, 5],\n",
    "            padding='same',\n",
    "            activation=tf.nn.relu,\n",
    "        )\n",
    "        self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)\n",
    "        self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))\n",
    "        self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)\n",
    "        self.dense2 = tf.keras.layers.Dense(units=10)\n",
    "\n",
    "    def call(self, inputs):\n",
    "        x = self.conv1(inputs)                  # [batch_size, 28, 28, 32]\n",
    "        x = self.pool1(x)                       # [batch_size, 14, 14, 32]\n",
    "        x = self.conv2(x)                       # [batch_size, 14, 14, 64]\n",
    "        x = self.pool2(x)                       # [batch_size, 7, 7, 64]\n",
    "        x = self.flatten(x)                     # [batch_size, 7 * 7 * 64]\n",
    "        x = self.dense1(x)                      # [batch_size, 1024]\n",
    "        x = self.dense2(x)                      # [batch_size, 10]\n",
    "        output = tf.nn.softmax(x)\n",
    "        return output\n",
    "    \n",
    "    \n",
    "    \n",
    "############## 数据管道 #############################\n",
    "class MNISTLoader():\n",
    "    def __init__(self):\n",
    "        mnist = tf.keras.datasets.mnist\n",
    "        (self.train_data, self.train_label), (self.test_data, self.test_label) = mnist.load_data()\n",
    "        # MNIST中的图像默认为uint8（0-255的数字）。以下代码将其归一化到0-1之间的浮点数，并在最后增加一维作为颜色通道\n",
    "        self.train_data = np.expand_dims(self.train_data.astype(np.float32) / 255.0, axis=-1)      # [60000, 28, 28, 1]\n",
    "        self.test_data = np.expand_dims(self.test_data.astype(np.float32) / 255.0, axis=-1)        # [10000, 28, 28, 1]\n",
    "        self.train_label = self.train_label.astype(np.int32)    # [60000]\n",
    "        self.test_label = self.test_label.astype(np.int32)      # [10000]\n",
    "        self.num_train_data, self.num_test_data = self.train_data.shape[0], self.test_data.shape[0]\n",
    "\n",
    "    def get_batch(self, batch_size):\n",
    "        # 从数据集中随机取出batch_size个元素并返回\n",
    "        index = np.random.randint(0, np.shape(self.train_data)[0], batch_size)\n",
    "        return self.train_data[index, :], self.train_label[index]\n",
    "    \n",
    "############## 创建模型 ###################################    \n",
    "num_batches = 1000\n",
    "batch_size = 50\n",
    "learning_rate = 0.001\n",
    "data_loader = MNISTLoader()\n",
    "model = CNN()\n",
    "optimizer = tf.keras.optimizers.Adam(lr=learning_rate)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss:  2.30859\n",
      "loss:  2.2340374\n",
      "loss:  2.11493564\n",
      "loss:  1.93809342\n",
      "loss:  1.83007169\n",
      "loss:  1.69274735\n",
      "loss:  1.59957325\n",
      "loss:  1.21699929\n",
      "loss:  1.15978587\n",
      "loss:  0.932443619\n",
      "loss:  0.872278512\n",
      "loss:  0.679867625\n",
      "loss:  0.516669095\n",
      "loss:  0.758668065\n",
      "loss:  0.751002252\n",
      "loss:  0.664085925\n",
      "loss:  0.652081609\n",
      "loss:  0.821466625\n",
      "loss:  0.773144126\n",
      "loss:  0.57678324\n",
      "loss:  0.268536806\n",
      "loss:  0.53191483\n",
      "loss:  0.456898868\n",
      "loss:  0.415572137\n",
      "loss:  0.239215359\n",
      "loss:  0.53759706\n",
      "loss:  0.439769208\n",
      "loss:  0.266360849\n",
      "loss:  0.601416\n",
      "loss:  0.343753695\n",
      "loss:  0.201910421\n",
      "loss:  0.312974066\n",
      "loss:  0.306883037\n",
      "loss:  0.363924\n",
      "loss:  0.200239\n",
      "loss:  0.360672683\n",
      "loss:  0.238687128\n",
      "loss:  0.236905187\n",
      "loss:  0.345142633\n",
      "loss:  0.138407245\n",
      "loss:  0.42357564\n",
      "loss:  0.132109851\n",
      "loss:  0.219590038\n",
      "loss:  0.490732968\n",
      "loss:  0.278162152\n",
      "loss:  0.15120703\n",
      "loss:  0.0375153571\n",
      "loss:  0.20297794\n",
      "loss:  0.338969618\n",
      "loss:  0.150947526\n",
      "loss:  0.17179352\n",
      "loss:  0.223425925\n",
      "loss:  0.196491361\n",
      "loss:  0.285376579\n",
      "loss:  0.284154058\n",
      "loss:  0.219747931\n",
      "loss:  0.282542109\n",
      "loss:  0.412851721\n",
      "loss:  0.223076865\n",
      "loss:  0.198086\n",
      "loss:  0.114633091\n",
      "loss:  0.274324507\n",
      "loss:  0.120537311\n",
      "loss:  0.153130427\n",
      "loss:  0.375193447\n",
      "loss:  0.140126988\n",
      "loss:  0.32173112\n",
      "loss:  0.194057554\n",
      "loss:  0.138446823\n",
      "loss:  0.0915667787\n",
      "loss:  0.141352415\n",
      "loss:  0.373406559\n",
      "loss:  0.204777718\n",
      "loss:  0.200632095\n",
      "loss:  0.16556713\n",
      "loss:  0.113795578\n",
      "loss:  0.142855927\n",
      "loss:  0.0149778463\n",
      "loss:  0.363672018\n",
      "loss:  0.17592375\n",
      "loss:  0.164822623\n",
      "loss:  0.138820827\n",
      "loss:  0.230453447\n",
      "loss:  0.261440039\n",
      "loss:  0.247651383\n",
      "loss:  0.15254423\n",
      "loss:  0.04524827\n",
      "loss:  0.0654945\n",
      "loss:  0.0688281\n",
      "loss:  0.32408011\n",
      "loss:  0.0498735346\n",
      "loss:  0.0458036959\n",
      "loss:  0.0664545\n",
      "loss:  0.0995440111\n",
      "loss:  0.0584666543\n",
      "loss:  0.0446853861\n",
      "loss:  0.251780748\n",
      "loss:  0.187704712\n",
      "loss:  0.0449002348\n",
      "loss:  0.50923568\n",
      "loss:  0.103679731\n",
      "loss:  0.0822034851\n",
      "loss:  0.0839962587\n",
      "loss:  0.128038794\n",
      "loss:  0.111489564\n",
      "loss:  0.189143136\n",
      "loss:  0.129052162\n",
      "loss:  0.0703390613\n",
      "loss:  0.244499609\n",
      "loss:  0.200844899\n",
      "loss:  0.2632806\n",
      "loss:  0.215003401\n",
      "loss:  0.171272263\n",
      "loss:  0.0940127149\n",
      "loss:  0.194027409\n",
      "loss:  0.112828419\n",
      "loss:  0.369408786\n",
      "loss:  0.0985908061\n",
      "loss:  0.158525154\n",
      "loss:  0.100971937\n",
      "loss:  0.114109889\n",
      "loss:  0.0364543125\n",
      "loss:  0.125343889\n",
      "loss:  0.132001638\n",
      "loss:  0.066182144\n",
      "loss:  0.390985459\n",
      "loss:  0.31405443\n",
      "loss:  0.120724127\n",
      "loss:  0.196814731\n",
      "loss:  0.221040472\n",
      "loss:  0.119004056\n",
      "loss:  0.11673218\n",
      "loss:  0.0734111\n",
      "loss:  0.0436482802\n",
      "loss:  0.122932777\n",
      "loss:  0.178303704\n",
      "loss:  0.0903805345\n",
      "loss:  0.206961453\n",
      "loss:  0.224700034\n",
      "loss:  0.128220275\n",
      "loss:  0.0507869869\n",
      "loss:  0.0690311641\n",
      "loss:  0.0621284395\n",
      "loss:  0.177625507\n",
      "loss:  0.259410679\n",
      "loss:  0.112299532\n",
      "loss:  0.0954270065\n",
      "loss:  0.19073458\n",
      "loss:  0.0260900259\n",
      "loss:  0.121577799\n",
      "loss:  0.140008122\n",
      "loss:  0.138771176\n",
      "loss:  0.101905994\n",
      "loss:  0.41975522\n",
      "loss:  0.26793161\n",
      "loss:  0.0922055468\n",
      "loss:  0.208694369\n",
      "loss:  0.0568014421\n",
      "loss:  0.0788864493\n",
      "loss:  0.125595659\n",
      "loss:  0.133139089\n",
      "loss:  0.144907594\n",
      "loss:  0.0548937842\n",
      "loss:  0.0470337644\n",
      "loss:  0.181933969\n",
      "loss:  0.100555547\n",
      "loss:  0.0853213519\n",
      "loss:  0.057308957\n",
      "loss:  0.0592516139\n",
      "loss:  0.0446165688\n",
      "loss:  0.0264963098\n",
      "loss:  0.19939895\n",
      "loss:  0.0420911834\n",
      "loss:  0.2853719\n",
      "loss:  0.258644283\n",
      "loss:  0.0216030385\n",
      "loss:  0.104816809\n",
      "loss:  0.0977677628\n",
      "loss:  0.170920521\n",
      "loss:  0.305819243\n",
      "loss:  0.189065263\n",
      "loss:  0.270813406\n",
      "loss:  0.0323247612\n",
      "loss:  0.109686464\n",
      "loss:  0.0408971123\n",
      "loss:  0.148140639\n",
      "loss:  0.0487185232\n",
      "loss:  0.143682837\n",
      "loss:  0.0995319784\n",
      "loss:  0.0623166524\n",
      "loss:  0.083261162\n",
      "loss:  0.105056651\n",
      "loss:  0.228345782\n",
      "loss:  0.171323553\n",
      "loss:  0.117101818\n",
      "loss:  0.0345931388\n",
      "loss:  0.104757041\n",
      "loss:  0.199277654\n",
      "loss:  0.0977140442\n",
      "loss:  0.0261868183\n",
      "loss:  0.0924001783\n",
      "loss:  0.0762027204\n",
      "loss:  0.286849\n",
      "loss:  0.136922747\n",
      "loss:  0.0678502768\n",
      "loss:  0.0403731689\n",
      "loss:  0.0119871907\n",
      "loss:  0.0866033137\n",
      "loss:  0.0603222214\n",
      "loss:  0.194274127\n",
      "loss:  0.0786319077\n",
      "loss:  0.255074233\n",
      "loss:  0.0592418\n",
      "loss:  0.00742490217\n",
      "loss:  0.100490287\n",
      "loss:  0.0527605191\n",
      "loss:  0.165410534\n",
      "loss:  0.0892823637\n",
      "loss:  0.0654518157\n",
      "loss:  0.199929014\n",
      "loss:  0.0836292\n",
      "loss:  0.171044827\n",
      "loss:  0.0207655337\n",
      "loss:  0.0765956193\n",
      "loss:  0.14533934\n",
      "loss:  0.184256166\n",
      "loss:  0.265268773\n",
      "loss:  0.0573938973\n",
      "loss:  0.31122157\n",
      "loss:  0.0529795028\n",
      "loss:  0.0862265527\n",
      "loss:  0.142873362\n",
      "loss:  0.291384488\n",
      "loss:  0.206990302\n",
      "loss:  0.229765296\n",
      "loss:  0.159757167\n",
      "loss:  0.0703263357\n",
      "loss:  0.100359857\n",
      "loss:  0.0835954919\n",
      "loss:  0.0618771501\n",
      "loss:  0.0597552806\n",
      "loss:  0.0798355937\n",
      "loss:  0.0504310653\n",
      "loss:  0.0721972659\n",
      "loss:  0.0831670463\n",
      "loss:  0.0989117548\n",
      "loss:  0.045578856\n",
      "loss:  0.114916913\n",
      "loss:  0.193584725\n",
      "loss:  0.0530747622\n",
      "loss:  0.0291885287\n",
      "loss:  0.0332569666\n",
      "loss:  0.026792543\n",
      "loss:  0.0414272882\n",
      "loss:  0.128578275\n",
      "loss:  0.111425117\n",
      "loss:  0.157653078\n",
      "loss:  0.0436532684\n",
      "loss:  0.0762517303\n",
      "loss:  0.0881111845\n",
      "loss:  0.101179101\n",
      "loss:  0.0372624882\n",
      "loss:  0.0657837614\n",
      "loss:  0.0722577572\n",
      "loss:  0.104001895\n",
      "loss:  0.0579458661\n",
      "loss:  0.138401121\n",
      "loss:  0.0963198394\n",
      "loss:  0.0285489671\n",
      "loss:  0.0685210675\n",
      "loss:  0.155711442\n",
      "loss:  0.1760066\n",
      "loss:  0.0470751747\n",
      "loss:  0.0712967\n",
      "loss:  0.1386296\n",
      "loss:  0.0206904728\n",
      "loss:  0.0628308505\n",
      "loss:  0.248465449\n",
      "loss:  0.110264234\n",
      "loss:  0.0396882258\n",
      "loss:  0.233224735\n",
      "loss:  0.10828723\n",
      "loss:  0.0948562399\n",
      "loss:  0.0421241298\n",
      "loss:  0.121537797\n",
      "loss:  0.227227286\n",
      "loss:  0.0763012618\n",
      "loss:  0.0186965223\n",
      "loss:  0.0456245765\n",
      "loss:  0.0295157321\n",
      "loss:  0.0230087955\n",
      "loss:  0.146641552\n",
      "loss:  0.377396852\n",
      "loss:  0.192527518\n",
      "loss:  0.152411968\n",
      "loss:  0.216482282\n",
      "loss:  0.0794636756\n",
      "loss:  0.074413687\n",
      "loss:  0.0456183255\n",
      "loss:  0.0151654305\n",
      "loss:  0.0366318524\n",
      "loss:  0.0687883198\n",
      "loss:  0.0247855373\n",
      "loss:  0.058974579\n",
      "loss:  0.00477450201\n",
      "loss:  0.117799111\n",
      "loss:  0.209810108\n",
      "loss:  0.0888371542\n",
      "loss:  0.015437006\n",
      "loss:  0.0126528265\n",
      "loss:  0.0164975245\n",
      "loss:  0.0830525085\n",
      "loss:  0.0208741147\n",
      "loss:  0.0270178858\n",
      "loss:  0.191458091\n",
      "loss:  0.0324870795\n",
      "loss:  0.172114983\n",
      "loss:  0.143616766\n",
      "loss:  0.0143773425\n",
      "loss:  0.243405968\n",
      "loss:  0.0167587921\n",
      "loss:  0.0671056658\n",
      "loss:  0.132745907\n",
      "loss:  0.0232671387\n",
      "loss:  0.0960794464\n",
      "loss:  0.0742816\n",
      "loss:  0.11306309\n",
      "loss:  0.0812016204\n",
      "loss:  0.0744640604\n",
      "loss:  0.095839709\n",
      "loss:  0.0386919528\n",
      "loss:  0.104055397\n",
      "loss:  0.0643415079\n",
      "loss:  0.0104489448\n",
      "loss:  0.145027116\n",
      "loss:  0.0222265869\n",
      "loss:  0.0338858142\n",
      "loss:  0.00772570865\n",
      "loss:  0.021041248\n",
      "loss:  0.105168134\n",
      "loss:  0.163134754\n",
      "loss:  0.0884149373\n",
      "loss:  0.0842412114\n",
      "loss:  0.0962962508\n",
      "loss:  0.0943272412\n",
      "loss:  0.0138635468\n",
      "loss:  0.122295685\n",
      "loss:  0.0161208194\n",
      "loss:  0.00807893556\n",
      "loss:  0.273581684\n",
      "loss:  0.0743485093\n",
      "loss:  0.0299173854\n",
      "loss:  0.103000641\n",
      "loss:  0.0940943807\n",
      "loss:  0.124708585\n",
      "loss:  0.138123021\n",
      "loss:  0.107257336\n",
      "loss:  0.157155216\n",
      "loss:  0.0692042038\n",
      "loss:  0.0430981293\n",
      "loss:  0.0256890375\n",
      "loss:  0.0439669266\n",
      "loss:  0.0979744941\n",
      "loss:  0.0544650331\n",
      "loss:  0.192036167\n",
      "loss:  0.0561071187\n",
      "loss:  0.0924589634\n",
      "loss:  0.0554057173\n",
      "loss:  0.063432157\n",
      "loss:  0.0574876294\n",
      "loss:  0.0615500398\n",
      "loss:  0.0330045782\n",
      "loss:  0.0434721857\n",
      "loss:  0.192795888\n",
      "loss:  0.133145556\n",
      "loss:  0.00542332605\n",
      "loss:  0.0537373275\n",
      "loss:  0.0598851256\n",
      "loss:  0.0413058326\n",
      "loss:  0.0233153291\n",
      "loss:  0.134992078\n",
      "loss:  0.123230658\n",
      "loss:  0.0483474359\n",
      "loss:  0.0728492588\n",
      "loss:  0.0447693579\n",
      "loss:  0.0784772336\n",
      "loss:  0.118578471\n",
      "loss:  0.0421760604\n",
      "loss:  0.0458749421\n",
      "loss:  0.0948572606\n",
      "loss:  0.0367174633\n",
      "loss:  0.0479582027\n",
      "loss:  0.131918177\n",
      "loss:  0.083275184\n",
      "loss:  0.0126065072\n",
      "loss:  0.0407466255\n",
      "loss:  0.101187021\n",
      "loss:  0.088572681\n",
      "loss:  0.303579211\n",
      "loss:  0.027947126\n",
      "loss:  0.0262789037\n",
      "loss:  0.132738382\n",
      "loss:  0.0182244089\n",
      "loss:  0.0225388724\n",
      "loss:  0.0265959986\n",
      "loss:  0.147833601\n",
      "loss:  0.0413602814\n",
      "loss:  0.0502013\n",
      "loss:  0.0542664863\n",
      "loss:  0.117509216\n",
      "loss:  0.0814109743\n",
      "loss:  0.0119358981\n",
      "loss:  0.0971599519\n",
      "loss:  0.0383117236\n",
      "loss:  0.00688504381\n",
      "loss:  0.106420308\n",
      "loss:  0.0290321168\n",
      "loss:  0.0105251325\n",
      "loss:  0.0375247486\n",
      "loss:  0.0105559966\n",
      "loss:  0.142709047\n",
      "loss:  0.061826922\n",
      "loss:  0.0246834848\n",
      "loss:  0.0380401127\n",
      "loss:  0.0914378092\n",
      "loss:  0.0728576854\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss:  0.0983503833\n",
      "loss:  0.0321525298\n",
      "loss:  0.0576387\n",
      "loss:  0.0706933\n",
      "loss:  0.146571547\n",
      "loss:  0.0688927844\n",
      "loss:  0.277490675\n",
      "loss:  0.0136702899\n",
      "loss:  0.0127472868\n",
      "loss:  0.0142394518\n",
      "loss:  0.0936196446\n",
      "loss:  0.0104037765\n",
      "loss:  0.0436459146\n",
      "loss:  0.0147388466\n",
      "loss:  0.077041775\n",
      "loss:  0.058235947\n",
      "loss:  0.0146927489\n",
      "loss:  0.0302243158\n",
      "loss:  0.114086412\n",
      "loss:  0.0888605416\n",
      "loss:  0.0392818637\n",
      "loss:  0.0522052273\n",
      "loss:  0.0338935107\n",
      "loss:  0.0436260477\n",
      "loss:  0.139165312\n",
      "loss:  0.0417692177\n",
      "loss:  0.00542151788\n",
      "loss:  0.00965692382\n",
      "loss:  0.224222168\n",
      "loss:  0.250543982\n",
      "loss:  0.0408216082\n",
      "loss:  0.0615965463\n",
      "loss:  0.0886703879\n",
      "loss:  0.0464459807\n",
      "loss:  0.119751126\n",
      "loss:  0.0518451594\n",
      "loss:  0.0195443276\n",
      "loss:  0.0350716673\n",
      "loss:  0.105336994\n",
      "loss:  0.0105956923\n",
      "loss:  0.01623776\n",
      "loss:  0.0366701782\n",
      "loss:  0.0577814728\n",
      "loss:  0.0528233238\n",
      "loss:  0.0225539543\n",
      "loss:  0.0375950933\n",
      "loss:  0.0276213437\n",
      "loss:  0.0357109942\n",
      "loss:  0.0221369602\n",
      "loss:  0.012641822\n",
      "loss:  0.0510088056\n",
      "loss:  0.0205044653\n",
      "loss:  0.0761286095\n",
      "loss:  0.0806904435\n",
      "loss:  0.0240366701\n",
      "loss:  0.0892662406\n",
      "loss:  0.051539585\n",
      "loss:  0.100919992\n",
      "loss:  0.0672811866\n",
      "loss:  0.187625736\n",
      "loss:  0.122791216\n",
      "loss:  0.00807996932\n",
      "loss:  0.0945661962\n",
      "loss:  0.107588351\n",
      "loss:  0.158946157\n",
      "loss:  0.0215214249\n",
      "loss:  0.209588751\n",
      "loss:  0.0582827292\n",
      "loss:  0.018287845\n",
      "loss:  0.152965456\n",
      "loss:  0.0353967845\n",
      "loss:  0.0571354628\n",
      "loss:  0.0336846225\n",
      "loss:  0.107074045\n",
      "loss:  0.00846771337\n",
      "loss:  0.0633643568\n",
      "loss:  0.134333044\n",
      "loss:  0.0426145606\n",
      "loss:  0.0337661095\n",
      "loss:  0.0111234309\n",
      "loss:  0.132024705\n",
      "loss:  0.0296499673\n",
      "loss:  0.0434399098\n",
      "loss:  0.143933728\n",
      "loss:  0.00838769\n",
      "loss:  0.0103567401\n",
      "loss:  0.0223950129\n",
      "loss:  0.153663203\n",
      "loss:  0.0984343141\n",
      "loss:  0.136954352\n",
      "loss:  0.148602813\n",
      "loss:  0.00434742076\n",
      "loss:  0.0709654838\n",
      "loss:  0.0208359733\n",
      "loss:  0.0614375323\n",
      "loss:  0.0168283693\n",
      "loss:  0.107325457\n",
      "loss:  0.00663037831\n",
      "loss:  0.10712947\n",
      "loss:  0.0162806902\n",
      "loss:  0.0376823\n",
      "loss:  0.0411870293\n",
      "loss:  0.2271557\n",
      "loss:  0.215192065\n",
      "loss:  0.0494317524\n",
      "loss:  0.0102179768\n",
      "loss:  0.256503284\n",
      "loss:  0.0646737069\n",
      "loss:  0.105488546\n",
      "loss:  0.0160490703\n",
      "loss:  0.0187153369\n",
      "loss:  0.105604298\n",
      "loss:  0.0590891279\n",
      "loss:  0.0417655334\n",
      "loss:  0.0426493846\n",
      "loss:  0.0469285361\n",
      "loss:  0.121941492\n",
      "loss:  0.127645105\n",
      "loss:  0.199119344\n",
      "loss:  0.120136507\n",
      "loss:  0.0249193795\n",
      "loss:  0.0129443966\n",
      "loss:  0.047302261\n",
      "loss:  0.0114959013\n",
      "loss:  0.0953331217\n",
      "loss:  0.0329432636\n",
      "loss:  0.0766361\n",
      "loss:  0.0396307334\n",
      "loss:  0.0353029557\n",
      "loss:  0.0979857817\n",
      "loss:  0.00816719327\n",
      "loss:  0.0617017299\n",
      "loss:  0.0664150491\n",
      "loss:  0.0187147893\n",
      "loss:  0.0143396072\n",
      "loss:  0.0443098508\n",
      "loss:  0.0398195572\n",
      "loss:  0.0688550472\n",
      "loss:  0.0193621106\n",
      "loss:  0.0913229063\n",
      "loss:  0.0124273486\n",
      "loss:  0.0387395732\n",
      "loss:  0.0611855611\n",
      "loss:  0.0367885269\n",
      "loss:  0.0233134609\n",
      "loss:  0.0233695172\n",
      "loss:  0.102459215\n",
      "loss:  0.0959739089\n",
      "loss:  0.00261435239\n",
      "loss:  0.0723621249\n",
      "loss:  0.0114806239\n",
      "loss:  0.315927833\n",
      "loss:  0.134909317\n",
      "loss:  0.0553975\n",
      "loss:  0.0637311041\n",
      "loss:  0.0134339593\n",
      "loss:  0.0434656702\n",
      "loss:  0.0375792235\n",
      "loss:  0.122022331\n",
      "loss:  0.064537771\n",
      "loss:  0.01466903\n",
      "loss:  0.00919609331\n",
      "loss:  0.10718888\n",
      "loss:  0.0983906463\n",
      "loss:  0.036223691\n",
      "loss:  0.149190933\n",
      "loss:  0.0145835225\n",
      "loss:  0.030249605\n",
      "loss:  0.02360126\n",
      "loss:  0.0528003313\n",
      "loss:  0.00994212553\n",
      "loss:  0.0146786273\n",
      "loss:  0.0292404182\n",
      "loss:  0.0408015959\n",
      "loss:  0.0650387183\n",
      "loss:  0.00630706\n",
      "loss:  0.014940829\n",
      "loss:  0.104306422\n",
      "loss:  0.0412268788\n",
      "loss:  0.0320490077\n",
      "loss:  0.0238091797\n",
      "loss:  0.0541750044\n",
      "loss:  0.0125922589\n",
      "loss:  0.0400016606\n",
      "loss:  0.0747335181\n",
      "loss:  0.0348128863\n",
      "loss:  0.131350785\n",
      "loss:  0.0273757093\n",
      "loss:  0.0321327783\n",
      "loss:  0.00707843108\n",
      "loss:  0.083048746\n",
      "loss:  0.148200959\n",
      "loss:  0.00778401596\n",
      "loss:  0.0311588813\n",
      "loss:  0.0838994384\n",
      "loss:  0.107872203\n",
      "loss:  0.00930978451\n",
      "loss:  0.00598950312\n",
      "loss:  0.122267663\n",
      "loss:  0.00801359396\n",
      "loss:  0.0313935131\n",
      "loss:  0.119246587\n",
      "loss:  0.0304899216\n",
      "loss:  0.0312098954\n",
      "loss:  0.0483059734\n",
      "loss:  0.0042327703\n",
      "loss:  0.0109251086\n",
      "loss:  0.0217300132\n",
      "loss:  0.0920806676\n",
      "loss:  0.00386452442\n",
      "loss:  0.0349542424\n",
      "loss:  0.051638972\n",
      "loss:  0.00931231305\n",
      "loss:  0.0204890445\n",
      "loss:  0.126136795\n",
      "loss:  0.126236126\n",
      "loss:  0.0274639539\n",
      "loss:  0.032110177\n",
      "loss:  0.0597494654\n",
      "loss:  0.0306389686\n",
      "loss:  0.0601423942\n",
      "loss:  0.0382786505\n",
      "loss:  0.0138429282\n",
      "loss:  0.0224961974\n",
      "loss:  0.035778977\n",
      "loss:  0.0576511323\n",
      "loss:  0.0192607157\n",
      "loss:  0.007530089\n",
      "loss:  0.059028212\n",
      "loss:  0.021209294\n",
      "loss:  0.0285928305\n",
      "loss:  0.0222638324\n",
      "loss:  0.0101190303\n",
      "loss:  0.0506259315\n",
      "loss:  0.0144479619\n",
      "loss:  0.0105682947\n",
      "loss:  0.0551597401\n",
      "loss:  0.0141155543\n",
      "loss:  0.129218295\n",
      "loss:  0.0376990587\n",
      "loss:  0.0524780378\n",
      "loss:  0.0195876956\n",
      "loss:  0.153413966\n",
      "loss:  0.0295987111\n",
      "loss:  0.0133569948\n",
      "loss:  0.00114147307\n",
      "loss:  0.00945188664\n",
      "loss:  0.0121606663\n",
      "loss:  0.0456754491\n",
      "loss:  0.00670670765\n",
      "loss:  0.0347001813\n",
      "loss:  0.0757813901\n",
      "loss:  0.0267023779\n",
      "loss:  0.0103490027\n",
      "loss:  0.00645198533\n",
      "loss:  0.00646448741\n",
      "loss:  0.0120602846\n",
      "loss:  0.160002708\n",
      "loss:  0.0716595203\n",
      "loss:  0.104357012\n",
      "loss:  0.0149969403\n",
      "loss:  0.0678205267\n",
      "loss:  0.301245868\n",
      "loss:  0.0237028431\n",
      "loss:  0.0458422787\n",
      "loss:  0.0950491205\n",
      "loss:  0.145024389\n",
      "loss:  0.0728270933\n",
      "loss:  0.0253735725\n",
      "loss:  0.0673959\n",
      "loss:  0.0202851295\n",
      "loss:  0.00692174304\n",
      "loss:  0.00808167364\n",
      "loss:  0.0369935259\n",
      "loss:  0.0176877808\n",
      "loss:  0.0166845564\n",
      "loss:  0.0106796455\n",
      "loss:  0.132234633\n",
      "loss:  0.0149392346\n",
      "loss:  0.0555585735\n",
      "loss:  0.0280268472\n",
      "loss:  0.0324059\n",
      "loss:  0.0371437632\n",
      "loss:  0.0139455916\n",
      "loss:  0.0643513\n",
      "loss:  0.013835052\n",
      "loss:  0.0205021854\n",
      "loss:  0.0350988247\n",
      "loss:  0.093718566\n",
      "loss:  0.159351125\n",
      "loss:  0.0629651248\n",
      "loss:  0.0516963676\n",
      "loss:  0.0033984927\n",
      "loss:  0.275563836\n",
      "loss:  0.0171298888\n",
      "loss:  0.0664075315\n",
      "loss:  0.135892764\n",
      "loss:  0.0618951432\n",
      "loss:  0.0110857906\n",
      "loss:  0.00419693906\n",
      "loss:  0.00891245063\n",
      "loss:  0.0136172995\n",
      "loss:  0.219053268\n",
      "loss:  0.0139599573\n",
      "loss:  0.00634320779\n",
      "loss:  0.124132372\n",
      "loss:  0.0355937928\n",
      "loss:  0.0750233829\n",
      "loss:  0.0536457971\n",
      "loss:  0.0229753163\n",
      "loss:  0.0270507988\n",
      "loss:  0.0322468355\n",
      "loss:  0.0510017872\n",
      "loss:  0.0213551465\n",
      "loss:  0.0246407874\n",
      "loss:  0.0056255837\n",
      "loss:  0.0267170649\n",
      "loss:  0.0256348681\n",
      "loss:  0.0547079071\n",
      "loss:  0.0420154482\n",
      "loss:  0.19127582\n",
      "loss:  0.0693115741\n",
      "loss:  0.0107079931\n",
      "loss:  0.0529555976\n",
      "loss:  0.00511391042\n",
      "loss:  0.0240642764\n",
      "loss:  0.01473724\n",
      "loss:  0.0459389053\n",
      "loss:  0.00683094\n",
      "loss:  0.0430488884\n",
      "loss:  0.0067256582\n",
      "loss:  0.273612857\n",
      "loss:  0.0711777434\n",
      "loss:  0.0607456788\n",
      "loss:  0.118473381\n",
      "loss:  0.0271444768\n",
      "loss:  0.0159518272\n",
      "loss:  0.0523438565\n",
      "loss:  0.0367218293\n",
      "loss:  0.107730523\n",
      "loss:  0.00640873238\n",
      "loss:  0.036537651\n",
      "loss:  0.00532815093\n",
      "loss:  0.0181258749\n",
      "loss:  0.00321397372\n",
      "loss:  0.0735828578\n",
      "loss:  0.0221211165\n",
      "loss:  0.0148711\n",
      "loss:  0.0511457175\n",
      "loss:  0.0264642388\n",
      "loss:  0.0872933865\n",
      "loss:  0.0518191494\n",
      "loss:  0.0171729811\n",
      "loss:  0.0897342786\n",
      "loss:  0.0250377115\n",
      "loss:  0.0212318674\n",
      "loss:  0.0471152216\n",
      "loss:  0.00953239296\n",
      "loss:  0.0485694557\n",
      "loss:  0.0525943041\n",
      "loss:  0.152022466\n",
      "loss:  0.00478695892\n",
      "loss:  0.0447285324\n",
      "loss:  0.0448841676\n",
      "loss:  0.0064291032\n",
      "loss:  0.0605303422\n",
      "loss:  0.0379807651\n",
      "loss:  0.0209485777\n",
      "loss:  0.00362152816\n",
      "loss:  0.0936080366\n",
      "loss:  0.00323750521\n",
      "loss:  0.0136371497\n",
      "loss:  0.229549\n",
      "loss:  0.0710290521\n",
      "loss:  0.0100247888\n",
      "loss:  0.0361183062\n",
      "loss:  0.0431375466\n",
      "loss:  0.157874569\n",
      "loss:  0.133462504\n",
      "loss:  0.00419207569\n",
      "loss:  0.00689969119\n",
      "loss:  0.0465572365\n",
      "loss:  0.0768280253\n",
      "loss:  0.00468644826\n",
      "loss:  0.0760098249\n",
      "loss:  0.104448177\n",
      "loss:  0.0554364771\n",
      "loss:  0.00723804254\n",
      "loss:  0.0184212774\n",
      "loss:  0.0676715448\n",
      "loss:  0.00745058246\n",
      "loss:  0.00662799319\n",
      "loss:  0.0579451099\n",
      "loss:  0.0101203742\n",
      "loss:  0.0373012573\n",
      "loss:  0.0118433861\n",
      "loss:  0.111739397\n",
      "loss:  0.0233635716\n",
      "loss:  0.0930233374\n",
      "loss:  0.0147158848\n",
      "loss:  0.0361426584\n",
      "loss:  0.0427666716\n",
      "loss:  0.105191082\n",
      "loss:  0.0342628211\n",
      "loss:  0.0049364944\n",
      "loss:  0.0505503379\n",
      "loss:  0.11039748\n",
      "loss:  0.00239405106\n",
      "loss:  0.249699071\n",
      "loss:  0.0701025501\n",
      "loss:  0.0549939647\n",
      "loss:  0.0264283642\n",
      "loss:  0.0426897705\n",
      "loss:  0.0742913634\n",
      "loss:  0.154209971\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss:  0.0100687006\n",
      "loss:  0.0291222986\n",
      "loss:  0.0746133626\n",
      "loss:  0.0286083464\n",
      "loss:  0.122451335\n",
      "loss:  0.0526708215\n",
      "loss:  0.115209013\n",
      "loss:  0.0918994248\n",
      "loss:  0.0442525\n",
      "loss:  0.108096033\n",
      "loss:  0.0254177097\n",
      "loss:  0.0141457915\n",
      "loss:  0.0393474549\n",
      "loss:  0.00619741809\n",
      "loss:  0.0717821196\n",
      "loss:  0.0183010045\n",
      "loss:  0.0928830355\n",
      "loss:  0.100961208\n",
      "loss:  0.148157611\n",
      "loss:  0.0459507\n",
      "loss:  0.0329650491\n",
      "loss:  0.0559365116\n",
      "loss:  0.0957076848\n",
      "loss:  0.00941165071\n",
      "loss:  0.00987342373\n",
      "loss:  0.109686993\n",
      "loss:  0.0426589437\n",
      "loss:  0.150195509\n",
      "loss:  0.0358754806\n",
      "loss:  0.0585423522\n",
      "loss:  0.0219188705\n",
      "loss:  0.0996154696\n",
      "loss:  0.045648586\n",
      "loss:  0.0769954696\n",
      "loss:  0.0130854975\n",
      "loss:  0.018405892\n",
      "loss:  0.0186882261\n",
      "loss:  0.0101105096\n",
      "loss:  0.0595490448\n",
      "loss:  0.0631452352\n",
      "loss:  0.0252163224\n",
      "loss:  0.00694755418\n",
      "loss:  0.0256930534\n",
      "loss:  0.0124889351\n",
      "loss:  0.0889802\n",
      "loss:  0.024431251\n",
      "loss:  0.0306418035\n",
      "loss:  0.00617296621\n",
      "loss:  0.17561537\n",
      "loss:  0.108031541\n",
      "loss:  0.0110174669\n",
      "loss:  0.100056313\n",
      "loss:  0.154939726\n",
      "loss:  0.0024433895\n",
      "loss:  0.0483086109\n",
      "loss:  0.00715443259\n",
      "loss:  0.0429882333\n",
      "loss:  0.0636177808\n",
      "loss:  0.127181336\n",
      "loss:  0.225069821\n",
      "loss:  0.0347459838\n",
      "loss:  0.133316502\n",
      "loss:  0.0242443532\n",
      "loss:  0.0104481718\n",
      "loss:  0.0301741306\n",
      "loss:  0.0358594544\n",
      "loss:  0.176915497\n",
      "loss:  0.106310599\n",
      "loss:  0.00578615488\n",
      "loss:  0.0687955618\n",
      "loss:  0.0203172471\n",
      "loss:  0.0655119121\n",
      "loss:  0.0981056094\n",
      "loss:  0.0109238913\n",
      "loss:  0.0548908152\n",
      "loss:  0.00863322336\n",
      "loss:  0.010791353\n",
      "loss:  0.0780915618\n",
      "loss:  0.0577706471\n",
      "loss:  0.0147074545\n",
      "loss:  0.00666900259\n",
      "loss:  0.0113527318\n",
      "loss:  0.0131260147\n",
      "loss:  0.0346401893\n",
      "loss:  0.0230288561\n",
      "loss:  0.00650082808\n",
      "loss:  0.0492787026\n",
      "loss:  0.00484307902\n",
      "loss:  0.019138325\n",
      "loss:  0.011927966\n",
      "loss:  0.0213237908\n",
      "loss:  0.00468141865\n",
      "loss:  0.124334842\n",
      "loss:  0.00358451717\n",
      "loss:  0.0520271212\n",
      "loss:  0.000698292162\n",
      "loss:  0.0488871634\n",
      "loss:  0.0211262\n",
      "loss:  0.0134045063\n",
      "loss:  0.0357248969\n",
      "loss:  0.0183814708\n",
      "loss:  0.00589320669\n",
      "loss:  0.0103069544\n",
      "loss:  0.0175159927\n",
      "loss:  0.0340379551\n",
      "loss:  0.0465665571\n",
      "loss:  0.00641149143\n",
      "loss:  0.0395453386\n",
      "loss:  0.0893143266\n",
      "loss:  0.020423444\n",
      "loss:  0.0257539656\n",
      "loss:  0.0115756188\n",
      "loss:  0.00552798854\n",
      "loss:  0.1178395\n",
      "loss:  0.0893269\n",
      "loss:  0.0867690071\n",
      "loss:  0.00340977102\n",
      "loss:  0.215766728\n",
      "loss:  0.00808485225\n",
      "loss:  0.0671013296\n",
      "loss:  0.00169115851\n",
      "loss:  0.149877742\n",
      "loss:  0.00289530098\n",
      "loss:  0.00118330552\n",
      "loss:  0.199152648\n",
      "loss:  0.0617761575\n",
      "loss:  0.0149526214\n",
      "loss:  0.280632526\n",
      "loss:  0.00549453218\n",
      "loss:  0.0205124076\n",
      "loss:  0.01575936\n",
      "loss:  0.100493982\n",
      "loss:  0.027374411\n",
      "loss:  0.0794098452\n",
      "loss:  0.26189369\n",
      "loss:  0.0104734823\n",
      "loss:  0.032174103\n",
      "loss:  0.0431375206\n",
      "loss:  0.00697972439\n",
      "loss:  0.12395338\n",
      "loss:  0.0992686749\n",
      "loss:  0.0351584256\n",
      "loss:  0.0712285489\n",
      "loss:  0.00498206215\n",
      "loss:  0.0806584284\n",
      "loss:  0.173314184\n",
      "loss:  0.0701860785\n",
      "loss:  0.00895735342\n",
      "loss:  0.09659978\n",
      "loss:  0.0823641941\n",
      "loss:  0.0109729981\n",
      "loss:  0.115346693\n",
      "loss:  0.0317185223\n",
      "loss:  0.0211089421\n",
      "loss:  0.0580731742\n",
      "loss:  0.036389403\n",
      "loss:  0.0934965909\n",
      "loss:  0.0830564052\n",
      "loss:  0.0737114623\n",
      "88.77150177955627\n"
     ]
    }
   ],
   "source": [
    "############## 使用tf.function修饰，训练模型 ###################################\n",
    "@tf.function\n",
    "def train_one_step(X, y):\n",
    "    with tf.GradientTape() as tape:\n",
    "        y_pred = model(X)\n",
    "        loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y,\n",
    "                                                               y_pred=y_pred)\n",
    "        loss = tf.reduce_mean(loss)\n",
    "        # 使用 TensorFLow 内置的 tf.print 函数，tf.function 不支持 Python 内置 print\n",
    "        tf.print(\"loss: \", loss)\n",
    "    grads = tape.gradient(loss, model.variables)\n",
    "    optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))\n",
    "\n",
    "\n",
    "start_time = time.time()\n",
    "for batch_idx in range(num_batches):\n",
    "    X, y = data_loader.get_batch(batch_size)\n",
    "    train_one_step(X, y)\n",
    "end_time = time.time()\n",
    "print(end_time - start_time)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 上例中，被修饰函数参数 `X,y` 皆为 Numpy 数组；   \n",
    "内部通过 TensorFlow 函数定义了 `loss` 变量；内部引用了外部对象 `model,optimizer`，皆为 TensorFlow 对象；相关操作皆为 TensorFlow 原生操作"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# `tf.function` 内在机制"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "当被 `@tf.function` 修饰的函数第一次被调用的时候:\n",
    "1. `tf.function` 不支持的语句会直接执行，**并不会转换成计算图中的节点**\n",
    "2. 而每个 `tf.` 方法都只是定义了计算节点，而并没有进行任何实质的计算\n",
    "3. `Python` 控制流语句转换成 TensorFlow 计算图中的对应节点（比如说 `while` 和 `for` 语句转换为 `tf.while` ， `if` 语句转换为 `tf.cond` 等等；\n",
    "4. 基于上两部步，建立函数内代码的计算图表示\n",
    "5. 运行一次该计算图\n",
    "6. 将计算图缓存起来  \n",
    "\n",
    "当再次调用函数时，直接运行计算图\n",
    "- **注意：此时原始函数中`tf.function` 不支持的语句，已经不再存在**\n",
    "\n",
    "> **提示**：使用被修饰函数的 `get_concrete_function` 方法，可以直接获得生成的计算图。该方法接受的参数与原始函数相同  \n",
    "`graph=train_one_step.get_concrete_function(X,y)`"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 传入参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tf.function\n",
    "def f(x):\n",
    "    tf.print(x)\n",
    "    print(\"The function is running in Python\")    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The function is running in Python\n",
      "1\r\n"
     ]
    }
   ],
   "source": [
    "#### 第一次调用函数\n",
    "x = tf.constant(1, dtype=tf.int32)\n",
    "f(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 第一次执行：\n",
    "1. 输入参数 `x` 为 TensorFlow 的 `tf.int32` 张量\n",
    "- 尽管 `tf.print` 函数在前，但不会直接执行，而是定义节点\n",
    "- `print` 函数会直接执行\n",
    "- 图建立完成，然后执行，`tf.print` 函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10\r\n"
     ]
    }
   ],
   "source": [
    "#### 第二次调用函数\n",
    "x = tf.constant(10, dtype=tf.int32)\n",
    "f(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 第二次执行：\n",
    "1. 输入参数 `x` 仍然为 TensorFlow 的 `tf.int32` 张量\n",
    "- 直接执行图，即运行`tf.print` 函数，而`print`函数不包含在图中"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2\r\n"
     ]
    }
   ],
   "source": [
    "#### 再次调用\n",
    "b_ = np.array(2, dtype=np.int32) \n",
    "f(b_)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 再次执行时：\n",
    "1. 输入参数 `x` 变为 `np.int32` 格式\n",
    "- 仍然直接执行图，即运行`tf.print` 函数，而`print`函数不包含在图中"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The function is running in Python\n",
      "0.1\n",
      "0.2\n"
     ]
    }
   ],
   "source": [
    "#### 改变数据类型，再次调用函数\n",
    "c = tf.constant(0.1, dtype=tf.float32)\n",
    "f(c)\n",
    "d = tf.constant(0.2, dtype=tf.float32)\n",
    "f(d)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 输入数据格式改变，再次执行函数时：\n",
    "1. 因为**输入数据格式改变，重新创建计算图**，然后运行\n",
    "- 重复调用，即重复执行图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The function is running in Python\n",
      "0.2\n",
      "The function is running in Python\n",
      "1\n",
      "The function is running in Python\n",
      "2\n",
      "1\n",
      "The function is running in Python\n",
      "0.1\n",
      "The function is running in Python\n",
      "0.2\n",
      "WARNING:tensorflow:5 out of the last 6 calls to <function f at 0x000001F0BA753558> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings is likely due to passing python objects instead of tensors. Also, tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. Please refer to https://www.tensorflow.org/beta/tutorials/eager/tf_function#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
      "0.1\n"
     ]
    }
   ],
   "source": [
    "#### 输入原生的 python 数据\n",
    "f(d)\n",
    "f(1)\n",
    "f(2)\n",
    "f(1)\n",
    "f(0.1)\n",
    "f(0.2)\n",
    "f(0.1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 对于 Python 内置的整数和浮点数类型，只有当值完全一致的时候， `@tf.function` 才会复用之前建立的计算图，而并不会自动将 Python 内置的整数或浮点数等转换成张量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 调用外部变量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(1.0, shape=(), dtype=float32)\n",
      "tf.Tensor(2.0, shape=(), dtype=float32)\n",
      "tf.Tensor(3.0, shape=(), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "#### 函数内使用了外部变量\n",
    "a = tf.Variable(0.0)\n",
    "\n",
    "@tf.function\n",
    "def g():\n",
    "    a.assign(a + 1.0)\n",
    "    return a\n",
    "\n",
    "print(g())\n",
    "print(g())\n",
    "print(g())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 在被 `@tf.function` 修饰的函数里调用 `tf.Variable` 、 `tf.keras.optimizers` 、 `tf.keras.Model` 等包含有变量的数据结构  \n",
    "一旦被调用，这些结构将作为隐含的参数提供给函数。当这些结构内的值在函数内被修改时，在函数外也同样生效。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Python 控制流"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(4, shape=(), dtype=int32) tf.Tensor(0, shape=(), dtype=int32)\n"
     ]
    }
   ],
   "source": [
    "@tf.function\n",
    "def square_if_positive(x):\n",
    "    if x > 0:\n",
    "        x = x * x\n",
    "    else:\n",
    "        x = 0\n",
    "    return x\n",
    "\n",
    "\n",
    "a = tf.constant(2)\n",
    "b = tf.constant(-2)\n",
    "print(square_if_positive(a), square_if_positive(b))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "def tf__square_if_positive(x):\n",
      "  do_return = False\n",
      "  retval_ = ag__.UndefinedReturnValue()\n",
      "  with ag__.FunctionScope('square_if_positive', 'square_if_positive_scope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as square_if_positive_scope:\n",
      "\n",
      "    def get_state():\n",
      "      return ()\n",
      "\n",
      "    def set_state(_):\n",
      "      pass\n",
      "\n",
      "    def if_true():\n",
      "      x_1, = x,\n",
      "      x_1 = x_1 * x_1\n",
      "      return x_1\n",
      "\n",
      "    def if_false():\n",
      "      x = 0\n",
      "      return x\n",
      "    cond = x > 0\n",
      "    x = ag__.if_stmt(cond, if_true, if_false, get_state, set_state, ('x',), ())\n",
      "    do_return = True\n",
      "    retval_ = square_if_positive_scope.mark_return_value(x)\n",
      "  do_return,\n",
      "  return ag__.retval(retval_)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(tf.autograph.to_code(square_if_positive.python_function))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> - 原函数中的 Python 控制流 `if...else...` 被转换为了 `x = ag__.if_stmt(cond, if_true, if_false, get_state, set_state)` 这种计算图式的写法\n",
    "- 即 `tf.function` 支持 python 控制流，因此可以直接使用 Python 控制流构建计算图，而不是手动使用 TensorFlow 的 API"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `tf.TensorArray`：TensorFlow 动态数组"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 在即时执行模式下，可以直接使用一个 Python 列表（List）存放数组。但需要基于计算图的特性（例如使用 `@tf.function` 加速模型运行或者使用 `SavedModel` 导出模型），就无法使用这种方式了\n",
    "- TensorFlow 提供了 `tf.TensorArray` ，一种支持计算图特性的 TensorFlow 动态数组。\n",
    "\n",
    "\n",
    "\n",
    "- 其声明的方式为：\n",
    "    - `arr = tf.TensorArray(dtype, size, dynamic_size=False)` ：声明一个大小为 `size` ，类型为 `dtype` 的 T`ensorArray arr` 。如果将 `dynamic_size` 参数设置为 `True` ，则该数组会自动增长空间。\n",
    "   \n",
    "      \n",
    "- 其读取和写入的方法为：\n",
    "\n",
    "    - `write(index, value)` ：将 `value `写入数组的第 `index` 个位置；\n",
    "\n",
    "    - `read(index)` ：读取数组的第 `index` 个值；\n",
    "    \n",
    "       \n",
    "- 除此以外，`TensorArray` 还包括 `stack()` 、 `unstack()` 等常用操作\n",
    "\n",
    "   \n",
    "- 请注意，由于需要支持计算图， `tf.TensorArray` 的 `write()` 方法是不可以忽略左值的！也就是说，在图执行模式下，必须按照以下的形式写入数组：\n",
    "```\n",
    "arr = arr.write(index, value)\n",
    "```\n",
    "- 这样才可以正常生成一个计算图操作，并将该操作返回给 arr 。而不可以写成：\n",
    "```\n",
    "arr.write(index, value)     # 生成的计算图操作没有左值接收，从而丢失\n",
    "```\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(<unprintable>, shape=(), dtype=variant)\n",
      "tf.Tensor(0.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32) tf.Tensor(2.0, shape=(), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "@tf.function\n",
    "def array_write_and_read():\n",
    "    arr = tf.TensorArray(dtype=tf.float32, size=3)\n",
    "    arr = arr.write(0, tf.constant(0.0))\n",
    "    arr = arr.write(1, tf.constant(1.0))\n",
    "    arr = arr.write(2, tf.constant(2.0))\n",
    "    arr_0 = arr.read(0)\n",
    "    arr_1 = arr.read(1)\n",
    "    arr_2 = arr.read(2)\n",
    "    return arr, arr_0, arr_1, arr_2\n",
    "\n",
    "\n",
    "arr, a, b, c = array_write_and_read()\n",
    "print(arr)\n",
    "print(a, b, c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# `tf.function` 性能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor: id=3009, shape=(2,), dtype=int32, numpy=array([ 4, 10])>"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#### 被 tf.function 修饰的函数，除了内部机制差别，使用起来和普通函数一样\n",
    "@tf.function\n",
    "def add(a, b):\n",
    "    return a + b\n",
    "\n",
    "# 调用函数\n",
    "add(tf.constant([1, 2]), tf.constant([3, 8]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor: id=3035, shape=(3, 2), dtype=float32, numpy=\n",
       "array([[3., 3.],\n",
       "       [3., 3.],\n",
       "       [3., 3.]], dtype=float32)>"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 函数内使用函数\n",
    "@tf.function\n",
    "def dense_layer(x, w, b):\n",
    "    return add(tf.matmul(x, w), b)\n",
    "\n",
    "\n",
    "dense_layer(tf.ones([3, 2]), tf.ones([2, 2]), tf.ones([2]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<tf.Tensor: id=3150, shape=(3, 2), dtype=float32, numpy=\n",
       " array([[1., 1.],\n",
       "        [2., 2.],\n",
       "        [3., 3.]], dtype=float32)>,\n",
       " <tf.Tensor: id=3151, shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>]"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 计算梯度\n",
    "w = tf.Variable(np.random.randn(3, 2), dtype=tf.float32)\n",
    "b = tf.Variable(np.random.randn(2), dtype=tf.float32)\n",
    "\n",
    "x = tf.constant([[1.0, 2.0, 3.]], dtype=tf.float32)\n",
    "with tf.GradientTape() as tape:\n",
    "    result = dense_layer(x, w, b)\n",
    "    \n",
    "tape.gradient(result, [w, b])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "> 当图中有很多小操作时，图模式相比即时模式快很多；当图中有一些昂贵的操作，如卷积操作时，加速就不明显了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Eager conv: 0.5390309999993406\n",
      "Function conv: 0.5068611999995483\n"
     ]
    }
   ],
   "source": [
    "import timeit\n",
    "conv_layer = tf.keras.layers.Conv2D(100, 3)\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def conv_fn(image):\n",
    "    return conv_layer(image)\n",
    "\n",
    "\n",
    "image = tf.zeros([1, 200, 200, 100])\n",
    "# warm up\n",
    "conv_layer(image)\n",
    "conv_fn(image)\n",
    "print(\"Eager conv:\", timeit.timeit(lambda: conv_layer(image), number=10))\n",
    "print(\"Function conv:\", timeit.timeit(lambda: conv_fn(image), number=10))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 调试"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "即时执行模式调试代码比在 `tf.function` 内容易得多；因此将函数用 `tf.function` 装饰起来之前，应保证函数无 bug\n",
    "- 调试阶段，可以使用 `tf.config.run_functions_eagerly(True)`，全局禁用计算图；然后再开启\n",
    "- `print` 函数只有在计算图创建时调用，可以用来监控函数是否 `re(traced)`，即计算图是否因为输入数据类型改变等原因而重建了\n",
    "- `tf.print` 用来监控执行阶段的中间值\n",
    "- `tf.debugging.enable_check_numerics`用来监控是否生成了 `NaNs` 和 `Inf`\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 追踪和多态"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- Python 支持动态类型，同一函数可以接受不同数据类型的参数；而TensorFlow 计算图需要静态数据类型和数据形状\n",
    "- 而 `tf.function` 在接受同样的类型的数据时，会复用计算图；当输入数据的类型和形状不同时，会重新生成计算图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tracing with Tensor(\"a:0\", shape=(), dtype=int32)\n",
      "tf.Tensor(2, shape=(), dtype=int32)\n",
      "\n",
      "tf.Tensor(4, shape=(), dtype=int32)\n",
      "\n",
      "Tracing with Tensor(\"a:0\", shape=(), dtype=float32)\n",
      "tf.Tensor(2.2, shape=(), dtype=float32)\n",
      "\n",
      "Tracing with Tensor(\"a:0\", shape=(), dtype=string)\n",
      "tf.Tensor(b'aa', shape=(), dtype=string)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 多态\n",
    "\n",
    "@tf.function\n",
    "def double(a):\n",
    "    print(\"Tracing with\", a)\n",
    "    return a + a\n",
    "\n",
    "\n",
    "print(double(tf.constant(1)))\n",
    "print()\n",
    "print(double(tf.constant(2))) # 相同类型数据，复用前一步计算图\n",
    "print()\n",
    "print(double(tf.constant(1.1))) # 不同类型数据，重建计算图\n",
    "print()\n",
    "print(double(tf.constant(\"a\")))\n",
    "print()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tracing!\n",
      "Executing\n",
      "Tracing!\n",
      "Executing\n"
     ]
    }
   ],
   "source": [
    "def f():\n",
    "    print('Tracing!')\n",
    "    tf.print('Executing')\n",
    "\n",
    "# 使用函数而不是装饰器，不管输入数据，每次生成新计算图\n",
    "tf.function(f)()\n",
    "tf.function(f)()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Obtaining concrete trace：\n",
      "Executing traced function:\n",
      "tf.Tensor(b'aa', shape=(), dtype=string)\n",
      "\n",
      "Executing traced function agian:\n",
      "tf.Tensor(b'bb', shape=(), dtype=string)\n",
      "\n",
      "Using a concrete trace with incompatible types will throw an error\n",
      "Caught expected exception \n",
      "  <class 'tensorflow.python.framework.errors_impl.InvalidArgumentError'>:\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Traceback (most recent call last):\n",
      "  File \"<ipython-input-47-1aec9a4715a6>\", line 9, in assert_raises\n",
      "    yield\n",
      "  File \"<ipython-input-50-9503431df288>\", line 19, in <module>\n",
      "    double_strings(tf.constant(1))\n",
      "tensorflow.python.framework.errors_impl.InvalidArgumentError: cannot compute __inference_double_3350 as input #0(zero-based) was expected to be a string tensor but is a int32 tensor [Op:__inference_double_3350]\n"
     ]
    }
   ],
   "source": [
    "############### .get_concrete_function 方法获取计算图\n",
    "\n",
    "print(\"Obtaining concrete trace：\")\n",
    "\n",
    "# 获取特动计算图，指定数据类型\n",
    "double_strings = double.get_concrete_function(\n",
    "    tf.TensorSpec(shape=None, dtype=tf.string))\n",
    "\n",
    "print(\"Executing traced function:\")\n",
    "print(double_strings(tf.constant(\"a\")))\n",
    "print()\n",
    "\n",
    "# 与上一步相同的数据类型\n",
    "print(\"Executing traced function agian:\")\n",
    "print(double_strings(a=tf.constant(\"b\")))\n",
    "print()\n",
    "\n",
    "# 改变数据类型，报错\n",
    "print(\"Using a concrete trace with incompatible types will throw an error\")\n",
    "with assert_raises(tf.errors.InvalidArgumentError):\n",
    "    double_strings(tf.constant(1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tracing with Tensor(\"x:0\", shape=(None,), dtype=int32)\n",
      "tf.Tensor([4 1], shape=(2,), dtype=int32)\n",
      "Caught expected exception \n",
      "  <class 'ValueError'>:\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Traceback (most recent call last):\n",
      "  File \"<ipython-input-47-1aec9a4715a6>\", line 9, in assert_raises\n",
      "    yield\n",
      "  File \"<ipython-input-51-57d6c14feb7f>\", line 14, in <module>\n",
      "    next_collatz(tf.constant([[1, 2], [3, 4]]))\n",
      "ValueError: Python inputs incompatible with input_signature:\n",
      "  inputs: (\n",
      "    tf.Tensor(\n",
      "[[1 2]\n",
      " [3 4]], shape=(2, 2), dtype=int32))\n",
      "  input_signature: (\n",
      "    TensorSpec(shape=(None,), dtype=tf.int32, name=None))\n"
     ]
    }
   ],
   "source": [
    "############### input_signature 参数限制输入格式\n",
    "\n",
    "\n",
    "@tf.function(input_signature=(tf.TensorSpec(shape=[None], dtype=tf.int32), ))\n",
    "def next_collatz(x):\n",
    "    print(\"Tracing with\", x)\n",
    "    return tf.where(x % 2 == 0, x // 2, 3 * x + 1)\n",
    "\n",
    "\n",
    "print(next_collatz(tf.constant([1, 2])))\n",
    "\n",
    "# 限定了一维数据，输入二维数据会报错\n",
    "with assert_raises(ValueError):\n",
    "    next_collatz(tf.constant([[1, 2], [3, 4]]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Python 类型参数或 Tensor 参数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 用于控制超参和图构建的一些参数通常为 Python 类型，如 `num_layers=10,training=True,nonlinearity='relu'`；当这些参数改变时，重建图是合理的\n",
    "- 不用于控制图构建的参数发生改变，重建图则非常低效了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tracing with num_steps = 10\n",
      "Tracing with num_steps = 20\n"
     ]
    }
   ],
   "source": [
    "def train_one_step():\n",
    "    pass\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def train(num_steps):   \n",
    "    print(\"Tracing with num_steps = {}\".format(num_steps))\n",
    "    for _ in tf.range(num_steps):\n",
    "        train_one_step()\n",
    "\n",
    "\n",
    "train(num_steps=10) # num_steps 为 Python 类型数据\n",
    "train(num_steps=20) # 每次都会重建图，print 函数都会执行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tracing with num_steps = Tensor(\"num_steps:0\", shape=(), dtype=int32)\n"
     ]
    }
   ],
   "source": [
    "train(num_steps=tf.constant(10))  # num_steps 为 Tensor 类型\n",
    "train(num_steps=tf.constant(20))  # 再次调用会复用上一步创建的图"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `tf.function` 副作用"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 综上 `tf.function` 内部 Python 的函数副作用，如 print，对象改变 等只在图创建时起作用；因此只用来 调试 图的创建过程\n",
    "- 此外 `tf.Variable.assign,tf.print,tf.summary` 保证每次调用时，不论是创建图还是执行图，都会正常执行  \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Traced with 1\n",
      "Executed with 1\n",
      "Executed with 1\n",
      "Traced with 2\n",
      "Executed with 2\n"
     ]
    }
   ],
   "source": [
    "@tf.function\n",
    "def f(x):\n",
    "    print(\"Traced with\", x)\n",
    "    tf.print(\"Executed with\", x)\n",
    "\n",
    "\n",
    "f(1)\n",
    "f(1) # 调用上一步相同的图，Python 函数副作用不再起作用\n",
    "f(2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- `tf.py_function` 可以确保 Python 函数副作用每次都起作用 \n",
    "    - `tf.py_function` 会将输入输出都转换成 Tensor   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Python side effect\n",
      "Python side effect\n",
      "Python side effect\n"
     ]
    }
   ],
   "source": [
    "external_list = []\n",
    "\n",
    "\n",
    "def side_effect(x):\n",
    "    print('Python side effect')\n",
    "    external_list.append(x)\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def f(x):\n",
    "    tf.py_function(side_effect, inp=[x], Tout=[]) # Python 函数每次调用都会执行\n",
    "\n",
    "\n",
    "f(1)\n",
    "f(1) # 每次输入相同的数据，通常函数副作用会失效\n",
    "f(1)\n",
    "assert len(external_list) == 3\n",
    "# .numpy() call required because py_function casts 1 to tf.constant(1)\n",
    "assert external_list[0].numpy() == 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<tf.Tensor: id=3646, shape=(), dtype=int32, numpy=1>,\n",
       " <tf.Tensor: id=3647, shape=(), dtype=int32, numpy=1>,\n",
       " <tf.Tensor: id=3648, shape=(), dtype=int32, numpy=1>]"
      ]
     },
     "execution_count": 63,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "external_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Python 状态\n",
    "- Python 许多特征，如生成器和迭代器，依靠Python内核追踪状态，在即时执行模式会正常运行\n",
    "- 而在 `tf.function` 内部则出现问题"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Value of external_var: 0\n",
      "Value of external_var: 0\n",
      "Value of external_var: 0\n"
     ]
    }
   ],
   "source": [
    "external_var = tf.Variable(0)\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def buggy_consume_next(iterator):\n",
    "    external_var.assign_add(next(iterator))  # Python 的迭代器\n",
    "    tf.print(\"Value of external_var:\", external_var)\n",
    "\n",
    "\n",
    "iterator = iter([0, 1, 2, 3])\n",
    "buggy_consume_next(iterator)\n",
    "\n",
    "# 不再消耗迭代器的下一个值，而是重复第一个值，图内部 next(iterator) 失效\n",
    "buggy_consume_next(iterator)\n",
    "buggy_consume_next(iterator)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 变量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "`tf.function` 会复用图，如果在其内部创建变量，复用图时，变量也会复用；因此**通常**其内部禁止创建变量    \n",
    " 而即时模式每次调用则会重新创建变量   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From D:\\Program\\Anaconda3\\envs\\tf2\\lib\\site-packages\\tensorflow_core\\python\\ops\\resource_variable_ops.py:1781: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "If using Keras pass *_constraint arguments to layers.\n",
      "Caught expected exception \n",
      "  <class 'ValueError'>:\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Traceback (most recent call last):\n",
      "  File \"<ipython-input-47-1aec9a4715a6>\", line 9, in assert_raises\n",
      "    yield\n",
      "  File \"<ipython-input-66-5990b9e714fd>\", line 9, in <module>\n",
      "    f(1.0)\n",
      "ValueError: in converted code:\n",
      "\n",
      "    <ipython-input-66-5990b9e714fd>:3 f  *\n",
      "        v = tf.Variable(1.0)\n",
      "    D:\\Program\\Anaconda3\\envs\\tf2\\lib\\site-packages\\tensorflow_core\\python\\ops\\variables.py:260 __call__\n",
      "        return cls._variable_v2_call(*args, **kwargs)\n",
      "    D:\\Program\\Anaconda3\\envs\\tf2\\lib\\site-packages\\tensorflow_core\\python\\ops\\variables.py:254 _variable_v2_call\n",
      "        shape=shape)\n",
      "    D:\\Program\\Anaconda3\\envs\\tf2\\lib\\site-packages\\tensorflow_core\\python\\ops\\variables.py:65 getter\n",
      "        return captured_getter(captured_previous, **kwargs)\n",
      "    D:\\Program\\Anaconda3\\envs\\tf2\\lib\\site-packages\\tensorflow_core\\python\\eager\\def_function.py:413 invalid_creator_scope\n",
      "        \"tf.function-decorated function tried to create \"\n",
      "\n",
      "    ValueError: tf.function-decorated function tried to create variables on non-first call.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "@tf.function\n",
    "def f(x):\n",
    "    v = tf.Variable(1.0)  # 内部创建新变量，则会出错\n",
    "    v.assign_add(x)\n",
    "    return v\n",
    "\n",
    "\n",
    "with assert_raises(ValueError):\n",
    "    f(1.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(2.0, shape=(), dtype=float32)\n",
      "tf.Tensor(4.0, shape=(), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "# 使用外部变量，正常运行\n",
    "v = tf.Variable(1.0)\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def f(x):\n",
    "    return v.assign_add(x)\n",
    "\n",
    "\n",
    "print(f(1.0))  # 2.0\n",
    "print(f(2.0))  # 4.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(2.0, shape=(), dtype=float32)\n",
      "tf.Tensor(4.0, shape=(), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "class C:\n",
    "    pass\n",
    "\n",
    "\n",
    "obj = C()\n",
    "obj.v = None\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def g(x):\n",
    "    if obj.v is None:            # 函数第一次调用时才会执行该逻辑内语句\n",
    "        obj.v = tf.Variable(1.0) # 因此保证了变量只会在在第一次调用时创建，所以不会出错\n",
    "    return obj.v.assign_add(x)\n",
    "\n",
    "\n",
    "print(g(1.0))  # 2.0\n",
    "print(g(2.0))  # 4.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(12.0, shape=(), dtype=float32)\n",
      "tf.Tensor(36.0, shape=(), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "state = []\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def fn(x):\n",
    "    if not state:   \n",
    "        state.append(tf.Variable(2.0 * x))        # 创建的变量依赖于函数传参\n",
    "        state.append(tf.Variable(state[0] * 3.0)) # 同理变量只会在在第一次调用时创建，所以不会出错\n",
    "    return state[0] * x * state[1]\n",
    "\n",
    "\n",
    "print(fn(tf.constant(1.0)))\n",
    "print(fn(tf.constant(3.0)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=2.0>,\n",
       " <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=6.0>]"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "state"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## AutoGraph "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "AutoGraph 将Python即时执行代码集合转换成图，包含控制流程 `if, for, while`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.207458138 0.250728369 0.0565127134 0.900473475 0.92794323]\n",
      "[0.204532236 0.245603204 0.0564526282 0.716528356 0.72963351]\n",
      "[0.201727062 0.240781173 0.0563927293 0.61475426 0.62284112]\n",
      "[0.199034527 0.236233428 0.0563330203 0.547465086 0.553103089]\n",
      "[0.196447253 0.231934905 0.0562734976 0.498617917 0.502842307]\n",
      "[0.193958566 0.227863565 0.0562141687 0.4610295 0.464349538]\n",
      "[0.191562369 0.224000067 0.0561550297 0.430922925 0.433622539]\n",
      "[0.189253047 0.220327288 0.0560960732 0.406092346 0.408344269]\n",
      "[0.187025458 0.21682997 0.0560373031 0.385149688 0.387065887]\n",
      "[0.184874892 0.213494569 0.0559787154 0.367171288 0.368828028]\n",
      "[0.182797 0.210308939 0.0559203103 0.351514965 0.352966189]\n",
      "[0.180787787 0.207262143 0.0558620915 0.337718397 0.339003474]\n",
      "[0.178843543 0.204344422 0.0558040515 0.325438946 0.326587409]\n",
      "[0.176960841 0.201546878 0.0557461902 0.314416528 0.315451056]\n",
      "[0.175136492 0.198861465 0.0556885079 0.304449648 0.305387944]\n",
      "[0.17336753 0.196280882 0.0556310043 0.295379341 0.296235502]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tf.Tensor: id=4022, shape=(5,), dtype=float32, numpy=\n",
       "array([0.17165123, 0.19379847, 0.05557368, 0.2870784 , 0.28786382],\n",
       "      dtype=float32)>"
      ]
     },
     "execution_count": 71,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "@tf.function\n",
    "def f(x):\n",
    "    while tf.reduce_sum(x) > 1:\n",
    "        tf.print(x)\n",
    "        x = tf.tanh(x)\n",
    "    return x\n",
    "\n",
    "\n",
    "f(tf.random.uniform([5]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "查看 autograph 创建的代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "def tf__f(x):\n",
      "  do_return = False\n",
      "  retval_ = ag__.UndefinedReturnValue()\n",
      "  with ag__.FunctionScope('f', 'f_scope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as f_scope:\n",
      "\n",
      "    def get_state():\n",
      "      return ()\n",
      "\n",
      "    def set_state(_):\n",
      "      pass\n",
      "\n",
      "    def loop_body(x):\n",
      "      ag__.converted_call(tf.print, f_scope.callopts, (x,), None, f_scope)\n",
      "      x = ag__.converted_call(tf.tanh, f_scope.callopts, (x,), None, f_scope)\n",
      "      return x,\n",
      "\n",
      "    def loop_test(x):\n",
      "      return ag__.converted_call(tf.reduce_sum, f_scope.callopts, (x,), None, f_scope) > 1\n",
      "    x, = ag__.while_stmt(loop_test, loop_body, get_state, set_state, (x,), ('x',), ())\n",
      "    do_return = True\n",
      "    retval_ = f_scope.mark_return_value(x)\n",
      "  do_return,\n",
      "  return ag__.retval(retval_)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(tf.autograph.to_code(f.python_function))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 条件语句"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第一次调用：\n",
      "Tracing for loop\n",
      "Tracing fizzbuzz branch\n",
      "Tracing fizz branch\n",
      "Tracing buzz branch\n",
      "Tracing default branch\n",
      "1\n",
      "2\n",
      "fizz\n",
      "4\n",
      "buzz\n",
      "第二次调用：\n",
      "1\n",
      "2\n",
      "fizz\n",
      "4\n",
      "buzz\n",
      "fizz\n",
      "7\n",
      "8\n",
      "fizz\n",
      "buzz\n",
      "11\n",
      "fizz\n",
      "13\n",
      "14\n",
      "fizzbuzz\n",
      "16\n",
      "17\n",
      "fizz\n",
      "19\n",
      "buzz\n"
     ]
    }
   ],
   "source": [
    "@tf.function\n",
    "def fizzbuzz(n):\n",
    "    for i in tf.range(1, n + 1):\n",
    "        print('Tracing for loop')\n",
    "        if i % 15 == 0:\n",
    "            print('Tracing fizzbuzz branch') # 再次调用时会被忽略\n",
    "            tf.print('fizzbuzz')\n",
    "        elif i % 3 == 0:\n",
    "            print('Tracing fizz branch')\n",
    "            tf.print('fizz')\n",
    "        elif i % 5 == 0:\n",
    "            print('Tracing buzz branch')\n",
    "            tf.print('buzz')\n",
    "        else:\n",
    "            print('Tracing default branch')\n",
    "            tf.print(i)\n",
    "\n",
    "print(\"第一次调用：\")\n",
    "fizzbuzz(tf.constant(5))\n",
    "print(\"第二次调用：\")\n",
    "fizzbuzz(tf.constant(20))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 循环语句"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train([(1, 1), (1, 1), (1, 1)]) contains 11 nodes in its graph\n",
      "train([(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]) contains 32 nodes in its graph\n"
     ]
    }
   ],
   "source": [
    "def measure_graph_size(f, *args):\n",
    "    g = f.get_concrete_function(*args).graph\n",
    "    print(\"{}({}) contains {} nodes in its graph\".format(\n",
    "        f.__name__, ', '.join(map(str, args)), len(g.as_graph_def().node)))\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def train(dataset):\n",
    "    loss = tf.constant(0)\n",
    "    for x, y in dataset:\n",
    "        loss += tf.abs(y - x)  # Some dummy computation.\n",
    "    return loss\n",
    "\n",
    "\n",
    "small_data = [(1, 1)] * 3\n",
    "big_data = [(1, 1)] * 10\n",
    "\n",
    "measure_graph_size(train, small_data)\n",
    "measure_graph_size(train, big_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train(<DatasetV1Adapter shapes: (<unknown>, <unknown>), types: (tf.int32, tf.int32)>) contains 5 nodes in its graph\n",
      "train(<DatasetV1Adapter shapes: (<unknown>, <unknown>), types: (tf.int32, tf.int32)>) contains 5 nodes in its graph\n"
     ]
    }
   ],
   "source": [
    "measure_graph_size(\n",
    "    train,\n",
    "    tf.data.Dataset.from_generator(lambda: small_data, (tf.int32, tf.int32)))\n",
    "measure_graph_size(\n",
    "    train,\n",
    "    tf.data.Dataset.from_generator(lambda: big_data, (tf.int32, tf.int32)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor: id=4517, shape=(2, 3, 4), dtype=float32, numpy=\n",
       "array([[[0.23907411, 0.69274783, 0.8255179 , 0.79221475],\n",
       "        [0.9259411 , 0.6930404 , 1.5047482 , 0.906649  ],\n",
       "        [1.7105237 , 1.6460785 , 2.1498063 , 0.930629  ]],\n",
       "\n",
       "       [[0.11684346, 0.71039987, 0.87097895, 0.04925954],\n",
       "        [0.27159166, 1.0768443 , 1.7212101 , 0.92715   ],\n",
       "        [0.8292786 , 1.919958  , 2.0388227 , 1.2462468 ]]], dtype=float32)>"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "batch_size = 2\n",
    "seq_len = 3\n",
    "feature_size = 4\n",
    "\n",
    "\n",
    "def rnn_step(inp, state):\n",
    "    return inp + state\n",
    "\n",
    "\n",
    "@tf.function\n",
    "def dynamic_rnn(rnn_step, input_data, initial_state):\n",
    "    # [batch, time, features] -> [time, batch, features]\n",
    "    input_data = tf.transpose(input_data, [1, 0, 2])\n",
    "    max_seq_len = input_data.shape[0]\n",
    "\n",
    "    states = tf.TensorArray(tf.float32, size=max_seq_len)\n",
    "    state = initial_state\n",
    "    for i in tf.range(max_seq_len):\n",
    "        state = rnn_step(input_data[i], state)\n",
    "        states = states.write(i, state)\n",
    "    return tf.transpose(states.stack(), [1, 0, 2])\n",
    "\n",
    "\n",
    "dynamic_rnn(rnn_step, tf.random.uniform([batch_size, seq_len, feature_size]),\n",
    "            tf.zeros([batch_size, feature_size]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 总结：AutoGraph使用规范"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "\n",
    "1. 被`@tf.function`修饰的函数应尽可能使用TensorFlow中的函数而不是Python中的其他函数。\n",
    "    - 例如使用`tf.print`而不是`print`，使用`tf.range`而不是`range`，使用`tf.constant(True)`而不是`True`.  \n",
    "            \n",
    "    - Python中的函数仅仅会在跟踪执行函数以创建静态图的阶段使用，普通Python函数是无法嵌入到静态计算图中的，所以 在计算图构建好之后再次调用的时候，这些Python函数并没有被计算，而TensorFlow中的函数则可以嵌入到计算图中。使用普通的Python函数会导致 被`@tf.function`修饰前【eager执行】和被@tf.function修饰后【静态图执行】的输出不一致。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "2. 避免在`@tf.function`修饰的函数内部定义`tf.Variable`.\n",
    "    - 如果函数内部定义了`tf.Variable`,那么在【eager执行】时，这种创建`tf.Variable`的行为在每次函数调用时候都会发生。但是在【静态图执行】时，这种创建`tf.Variable`的行为只会发生在第一步跟踪Python代码逻辑创建计算图时，这会导致被`@tf.function`修饰前【eager执行】和被`@tf.function`修饰后【静态图执行】的输出不一致。实际上，TensorFlow在这种情况下一般会报错。 \n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "3. 被`@tf.function`修饰的函数不可修改该函数外部的Python列表或字典等数据结构变量。\n",
    "    - 静态计算图是被编译成C++代码在TensorFlow内核中执行的。Python中的列表和字典等数据结构变量是无法嵌入到计算图中，它们仅仅能够在创建计算图时被读取，在执行计算图时是无法修改Python中的列表或字典这样的数据结构变量的。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# AutoGraph和tf.Module"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 避免在`@tf.function`修饰的函数内部定义`tf.Variable`，但是如果在函数外部定义`tf.Variable`的话，又会显得这个函数有外部变量依赖，封装不够完美。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 基类`tf.Module`，通过继承它构建子类，可以非常方便地管理变量，还可以非常方便地管理它引用的其它Module，最重要的是，能够利用`tf.saved_model`保存模型并实现跨平台部署使用。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- `tf.keras.models.Model,tf.keras.layers.Layer` 都是继承自`tf.Module`的，提供了方便的变量管理和所引用的子模块管理的功能。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "4\r\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tf.Tensor: id=5614, shape=(), dtype=float32, numpy=4.0>"
      ]
     },
     "execution_count": 105,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#### 引用外部变量的函数\n",
    "x = tf.Variable(1.0, dtype=tf.float32)\n",
    "\n",
    "\n",
    "#在tf.function中用input_signature限定输入张量的签名类型：shape和dtype\n",
    "@tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.float32)])\n",
    "def add_print(a):\n",
    "    x.assign_add(a)\n",
    "    tf.print(x)\n",
    "    return (x)\n",
    "\n",
    "\n",
    "add_print(tf.constant(3.0))\n",
    "# add_print(tf.constant(3))  #输入不符合张量签名的参数将报错"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [],
   "source": [
    "#### Module之类，封装变量\n",
    "class DemoModule(tf.Module):\n",
    "    def __init__(self, init_value=tf.constant(0.0), name=None):\n",
    "        super(DemoModule, self).__init__(name=name)\n",
    "        with self.name_scope:  #相当于with tf.name_scope(\"demo_module\")\n",
    "            self.x = tf.Variable(init_value, dtype=tf.float32, trainable=True)\n",
    "\n",
    "    @tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.float32)])\n",
    "    def addprint(self, a):\n",
    "        with self.name_scope:\n",
    "            self.x.assign_add(a)\n",
    "            tf.print(self.x)\n",
    "            return (self.x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "6\r\n"
     ]
    }
   ],
   "source": [
    "demo = DemoModule(init_value=tf.constant(1.0))\n",
    "result = demo.addprint(tf.constant(5.0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "56\r\n"
     ]
    }
   ],
   "source": [
    "result = demo.addprint(tf.constant(50.0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "()"
      ]
     },
     "execution_count": 109,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#查看模块中的全部子模块\n",
    "demo.submodules"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: demo_model\\assets\n"
     ]
    }
   ],
   "source": [
    "#使用tf.saved_model 保存模型，并指定需要跨平台部署的方法\n",
    "tf.saved_model.save(\n",
    "    demo,\n",
    "    \"demo_model\",\n",
    "    signatures={\"serving_default\": demo.addprint},  # \n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "61\r\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tf.Tensor: id=5942, shape=(), dtype=float32, numpy=61.0>"
      ]
     },
     "execution_count": 112,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#加载模型\n",
    "demo2 = tf.saved_model.load(\"demo_model\")\n",
    "demo2.addprint(tf.constant(5.0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:\n",
      "\n",
      "signature_def['__saved_model_init_op']:\n",
      "  The given SavedModel SignatureDef contains the following input(s):\n",
      "  The given SavedModel SignatureDef contains the following output(s):\n",
      "    outputs['__saved_model_init_op'] tensor_info:\n",
      "        dtype: DT_INVALID\n",
      "        shape: unknown_rank\n",
      "        name: NoOp\n",
      "  Method name is: \n",
      "\n",
      "signature_def['serving_default']:\n",
      "  The given SavedModel SignatureDef contains the following input(s):\n",
      "    inputs['a'] tensor_info:\n",
      "        dtype: DT_FLOAT\n",
      "        shape: ()\n",
      "        name: serving_default_a:0\n",
      "  The given SavedModel SignatureDef contains the following output(s):\n",
      "    outputs['output_0'] tensor_info:\n",
      "        dtype: DT_FLOAT\n",
      "        shape: ()\n",
      "        name: StatefulPartitionedCall:0\n",
      "  Method name is: tensorflow/serving/predict\n"
     ]
    }
   ],
   "source": [
    "# 查看模型文件相关信息\n",
    "!saved_model_cli show --dir demo_model --all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.keras import models, layers, losses, metrics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n",
      "True\n",
      "True\n"
     ]
    }
   ],
   "source": [
    "print(issubclass(tf.keras.Model, tf.Module))\n",
    "print(issubclass(tf.keras.layers.Layer, tf.Module))\n",
    "print(issubclass(tf.keras.Model, tf.keras.layers.Layer))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense (Dense)                (None, 4)                 44        \n",
      "_________________________________________________________________\n",
      "dense_1 (Dense)              (None, 2)                 10        \n",
      "_________________________________________________________________\n",
      "dense_2 (Dense)              (None, 1)                 3         \n",
      "=================================================================\n",
      "Total params: 57\n",
      "Trainable params: 57\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "tf.keras.backend.clear_session() \n",
    "\n",
    "model = models.Sequential()\n",
    "\n",
    "model.add(layers.Dense(4,input_shape = (10,)))\n",
    "model.add(layers.Dense(2))\n",
    "model.add(layers.Dense(1))\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<tf.Variable 'dense/kernel:0' shape=(10, 4) dtype=float32, numpy=\n",
       " array([[ 0.49823368,  0.38705254,  0.49931097, -0.47241575],\n",
       "        [-0.08276552, -0.3621772 ,  0.36847925, -0.10161632],\n",
       "        [-0.49198854,  0.58784914, -0.5340485 , -0.4401762 ],\n",
       "        [ 0.24897873, -0.00801528, -0.08869678,  0.6431689 ],\n",
       "        [-0.21808079, -0.41452947, -0.05316031,  0.33350515],\n",
       "        [-0.14954305,  0.35132825, -0.04744196, -0.2250906 ],\n",
       "        [ 0.4554441 , -0.18187407, -0.46764714,  0.1906746 ],\n",
       "        [-0.41395682,  0.20659739,  0.41261542,  0.5152123 ],\n",
       "        [-0.62937295, -0.29700363,  0.10905778,  0.10109794],\n",
       "        [-0.34243053, -0.20391187,  0.4967723 , -0.31809756]],\n",
       "       dtype=float32)>,\n",
       " <tf.Variable 'dense/bias:0' shape=(4,) dtype=float32, numpy=array([0., 0., 0., 0.], dtype=float32)>,\n",
       " <tf.Variable 'dense_1/kernel:0' shape=(4, 2) dtype=float32, numpy=\n",
       " array([[-0.4328966 , -0.7175617 ],\n",
       "        [ 0.18615818, -0.11278558],\n",
       "        [-0.14009857,  0.9860091 ],\n",
       "        [ 0.09973741, -0.91382265]], dtype=float32)>,\n",
       " <tf.Variable 'dense_1/bias:0' shape=(2,) dtype=float32, numpy=array([0., 0.], dtype=float32)>,\n",
       " <tf.Variable 'dense_2/kernel:0' shape=(2, 1) dtype=float32, numpy=\n",
       " array([[1.0078322],\n",
       "        [1.2090417]], dtype=float32)>,\n",
       " <tf.Variable 'dense_2/bias:0' shape=(1,) dtype=float32, numpy=array([0.], dtype=float32)>]"
      ]
     },
     "execution_count": 118,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<tf.Variable 'dense_1/kernel:0' shape=(4, 2) dtype=float32, numpy=\n",
       " array([[-0.4328966 , -0.7175617 ],\n",
       "        [ 0.18615818, -0.11278558],\n",
       "        [-0.14009857,  0.9860091 ],\n",
       "        [ 0.09973741, -0.91382265]], dtype=float32)>,\n",
       " <tf.Variable 'dense_1/bias:0' shape=(2,) dtype=float32, numpy=array([0., 0.], dtype=float32)>,\n",
       " <tf.Variable 'dense_2/kernel:0' shape=(2, 1) dtype=float32, numpy=\n",
       " array([[1.0078322],\n",
       "        [1.2090417]], dtype=float32)>,\n",
       " <tf.Variable 'dense_2/bias:0' shape=(1,) dtype=float32, numpy=array([0.], dtype=float32)>]"
      ]
     },
     "execution_count": 119,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.layers[0].trainable = False  #冻结第0层的变量,使其不可训练\n",
    "model.trainable_variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(<tensorflow.python.keras.engine.input_layer.InputLayer at 0x1f0c7b3ce48>,\n",
       " <tensorflow.python.keras.layers.core.Dense at 0x1f0c7b5d208>,\n",
       " <tensorflow.python.keras.layers.core.Dense at 0x1f0c778d508>,\n",
       " <tensorflow.python.keras.layers.core.Dense at 0x1f0c79bd408>)"
      ]
     },
     "execution_count": 120,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.submodules"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 121,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<tensorflow.python.keras.layers.core.Dense at 0x1f0c7b5d208>,\n",
       " <tensorflow.python.keras.layers.core.Dense at 0x1f0c778d508>,\n",
       " <tensorflow.python.keras.layers.core.Dense at 0x1f0c79bd408>]"
      ]
     },
     "execution_count": 121,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.layers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sequential\n",
      "sequential\n"
     ]
    }
   ],
   "source": [
    "print(model.name)\n",
    "print(model.name_scope())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tf2",
   "language": "python",
   "name": "tf2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
