{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(60000, 28, 28)\n",
      "(60000,)\n",
      "(10000, 28, 28)\n",
      "(10000,)\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "\n",
    "fashion=tf.keras.datasets.fashion_mnist\n",
    "\n",
    "(x_train, y_train), (x_test, y_test)=fashion.load_data()\n",
    "\n",
    "print (x_train.shape)\n",
    "print (y_train.shape)\n",
    "\n",
    "print (x_test.shape)\n",
    "print (y_test.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "10"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(set(y_train))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   3   1   0   0   7   0  37   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   1   2   0  27  84\n",
      "   11   0   0   0   0   0   0 119   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   1   0   0  88 143\n",
      "  110   0   0   0   0  22  93 106   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   4   0  53 129 120\n",
      "  147 175 157 166 135 154 168 140   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   2   0  11 137 130 128\n",
      "  160 176 159 167 178 149 151 144   0   0]\n",
      " [  0   0   0   0   0   0   1   0   2   1   0   3   0   0 115 114 106 137\n",
      "  168 153 156 165 167 143 157 158  11   0]\n",
      " [  0   0   0   0   1   0   0   0   0   0   3   0   0  89 139  90  94 153\n",
      "  149 131 151 169 172 143 159 169  48   0]\n",
      " [  0   0   0   0   0   0   2   4   1   0   0   0  98 136 110 109 110 162\n",
      "  135 144 149 159 167 144 158 169 119   0]\n",
      " [  0   0   2   2   1   2   0   0   0   0  26 108 117  99 111 117 136 156\n",
      "  134 154 154 156 160 141 147 156 178   0]\n",
      " [  3   0   0   0   0   0   0  21  53  92 117 111 103 115 129 134 143 154\n",
      "  165 170 154 151 154 143 138 150 165  43]\n",
      " [  0   0  23  54  65  76  85 118 128 123 111 113 118 127 125 139 133 136\n",
      "  160 140 155 161 144 155 172 161 189  62]\n",
      " [  0  68  94  90 111 114 111 114 115 127 135 136 143 126 127 151 154 143\n",
      "  148 125 162 162 144 138 153 162 196  58]\n",
      " [ 70 169 129 104  98 100  94  97  98 102 108 106 119 120 129 149 156 167\n",
      "  190 190 196 198 198 187 197 189 184  36]\n",
      " [ 16 126 171 188 188 184 171 153 135 120 126 127 146 185 195 209 208 255\n",
      "  209 177 245 252 251 251 247 220 206  49]\n",
      " [  0   0   0  12  67 106 164 185 199 210 211 210 208 190 150  82   8   0\n",
      "    0   0 178 208 188 175 162 158 151  11]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]\n",
      " [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0\n",
      "    0   0   0   0   0   0   0   0   0   0]]\n"
     ]
    }
   ],
   "source": [
    "print(x_test[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "BATCH_SIZE=128\n",
    "EPOCH=50\n",
    "cls_num=len(set(y_train))\n",
    "image_shape=(28,28)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_model(features,feature_columns,output_cls):\n",
    "    \n",
    "    input_layer=tf.feature_column.input_layer(features,feature_columns)\n",
    "    \n",
    "    net = tf.reshape(input_layer, [-1, 28, 28, 1]) \n",
    "    l2=tf.keras.regularizers.l2(l=0.01)\n",
    "#     tf.layers.conv2d   isdeprecated ,keras.layers.conv2d\n",
    "    conv1=tf.keras.layers.Conv2D(filters=16,kernel_size=[3,3],activation='relu',name='conv1')(net)\n",
    "    pool1=tf.keras.layers.MaxPool2D(pool_size=(3,3))(conv1)\n",
    "    conv2=tf.keras.layers.Conv2D(filters=8,kernel_size=[5,5],activation='relu',name='conv2')(pool1)\n",
    "    flat=tf.keras.layers.Flatten()(conv2)\n",
    "    denlayer=tf.keras.layers.Dense(units=128,activation='relu',name='dense1',kernel_regularizer=l2)\n",
    "    dens1=denlayer(flat)\n",
    "    logits=tf.keras.layers.Dense(units=output_cls,name='dense_output')(dens1)\n",
    "    return logits,denlayer\n",
    "def  model_fn_builder(lr):\n",
    "    # 该方法实际 创建 estimator的model_fn\n",
    "    # 可以 有其他操作\n",
    "    def model_fn(features, labels, mode, params,config): #estimator需要的model_fn 参数固定\n",
    "        '''\n",
    "        features: from input_fn的返回  切记返回的顺序\n",
    "        labels： from input_fn 的返回  切记返回的顺序\n",
    "        mode: tf.estimator.ModeKeys实例的一种\n",
    "        params: 在初始化estimator时 传入的参数列表，dict形式,或者直接使用self.params也可以\n",
    "        config:初始化estimator时 的 Runconfig\n",
    "\n",
    "        '''\n",
    "        logits,dens1=create_model(features,params['feature_columns'],params['output_cls'])\n",
    "        \n",
    "        pre_cls=tf.math.argmax(input=logits,axis=1)\n",
    "        pre_prob=tf.nn.softmax(logits=logits,axis=1,name='pre_prob')\n",
    "        \n",
    "        is_predict=mode==tf.estimator.ModeKeys.PREDICT\n",
    "        if not is_predict:\n",
    "            # train .eval\n",
    "            loss=tf.losses.sparse_softmax_cross_entropy(labels=tf.cast(labels,tf.int32),logits=logits)\n",
    "            loss=loss+dens1.losses\n",
    "            tf.summary.scalar('loss1',tf.squeeze(dens1.losses))\n",
    "\n",
    "            def metric_fn(labels,predictions):\n",
    "                '''\n",
    "                define metrics\n",
    "                '''\n",
    "                accuracy,accuracy_update=tf.metrics.accuracy(labels=labels,predictions=predictions,name='image_accuracy')\n",
    "    #                 auc=tf.metrics.auc(labels=labels,predictions=predictions,name='iris_auc')\n",
    "                recall,recall_update=tf.metrics.recall(labels=labels,predictions=predictions,name='image_recall')\n",
    "                precision,precision_update=tf.metrics.precision(labels=labels,predictions=predictions,name='image_precision')\n",
    "    #                 with tf.control_dependencies([recall,precision]):\n",
    "    #                     f1_score=f1(recall=recall,precision=precision)\n",
    "\n",
    "                return {\n",
    "                    'accuracy':(accuracy,accuracy_update),\n",
    "                    'recall':(recall,recall_update),\n",
    "                    'precision':(precision,precision_update)                  \n",
    "                }\n",
    "            metrics=metric_fn(labels,pre_cls)\n",
    "#             tf.summary.(metrics)\n",
    "\n",
    "\n",
    "            if mode==tf.estimator.ModeKeys.EVAL:\n",
    "                return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=metrics)\n",
    "\n",
    "            # train process\n",
    "            train_op=tf.train.AdamOptimizer(learning_rate=lr).minimize(loss=loss,global_step=tf.train.get_global_step())\n",
    "            return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=train_op,eval_metric_ops=metrics)\n",
    "\n",
    "\n",
    "        else:\n",
    "            predictions={'predict_cls':pre_cls,'predict_pro':pre_prob}\n",
    "            return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions)     \n",
    "    return model_fn\n",
    "      "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "def input_fn_builder(x,y,batch_size,epochs,is_train=True):\n",
    "    '''\n",
    "    创建 输入函数闭包\n",
    "    '''\n",
    "    # 可以执行其它操作\n",
    "    \n",
    "    def input_fn():\n",
    "        dataset=tf.data.Dataset.from_tensor_slices(({'images':x},y) )  \n",
    "        if is_train:\n",
    "            dataset=dataset.shuffle(1000).repeat(epochs)\n",
    "        dataset=dataset.batch(batch_size)\n",
    "        return dataset # 返回的 顺序要和 model_fn一致 或者 dataset元素 格式为（features,label）元组 也可以\n",
    "    return input_fn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_dir=r'F:\\testDemo\\AI\\estimator\\model\\fashion1'\n",
    "params={}\n",
    "feature_columns=[tf.feature_column.numeric_column('images',shape=image_shape)]\n",
    "\n",
    "output_cls=cls_num\n",
    "params['feature_columns']=feature_columns\n",
    "params['output_cls']=output_cls\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[NumericColumn(key='images', shape=(28, 28), default_value=None, dtype=tf.float32, normalizer_fn=None)]"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feature_columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Using config: {'_model_dir': 'F:\\\\testDemo\\\\AI\\\\estimator\\\\model\\\\fashion1', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n",
      "graph_options {\n",
      "  rewrite_options {\n",
      "    meta_optimizer_iterations: ONE\n",
      "  }\n",
      "}\n",
      ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x0000029A0542C6D8>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n"
     ]
    }
   ],
   "source": [
    "tf.summary.FileWriterCache.clear()  # 该步骤 只有当您 对 model_dir进行过删除 又重建（相同路径），导致没有events生产时，再添加\n",
    "config=tf.estimator.RunConfig(save_checkpoints_steps=100)\n",
    "\n",
    "estimator=tf.estimator.Estimator(model_fn=model_fn_builder(0.001),model_dir=model_dir,params=params,config=config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "tensors_to_log = {\"test_hook\": \"pre_prob\"}\n",
    "\n",
    "logging_hook = tf.train.LoggingTensorHook(\n",
    "    tensors=tensors_to_log, every_n_iter=50)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [],
   "source": [
    "class testclsHook(tf.train.SessionRunHook):\n",
    "    \n",
    "    def begin(self):\n",
    "        \"\"\"再创建会话之前调用\n",
    "        调用begin()时，default graph会被创建，\n",
    "        可在此处向default graph增加新op,begin()调用后，default graph不能再被修改\n",
    "        \"\"\"\n",
    "        print(\"first\")\n",
    "        pass\n",
    "\n",
    "    def after_create_session(self, session, coord):  # pylint: disable=unused-argument\n",
    "        print(\"2end\")\n",
    "      \n",
    "        pass\n",
    "\n",
    "    def before_run(self, run_context):  # pylint: disable=unused-argument\n",
    "        print(\"third\")\n",
    "   \n",
    "        return None\n",
    "    def after_run(self,\n",
    "                run_context,  # pylint: disable=unused-argument\n",
    "                run_values):  # pylint: disable=unused-argument\n",
    "        \"\"\"调用在每个sess.run()之后\n",
    "        参数run_values是befor_run()中要求的op/tensor的返回值；\n",
    "        可以调用run_context.qeruest_stop()用于停止迭代\n",
    "        sess.run抛出任何异常after_run不会被调用\n",
    "        Args:\n",
    "          run_context: A `SessionRunContext` object.\n",
    "          run_values: A SessionRunValues object.\n",
    "        \"\"\"\n",
    "        print(\"4th\")\n",
    "        pass\n",
    "\n",
    "    def end(self, session):  # pylint: disable=unused-argument\n",
    "        \"\"\"在会话结束时调用\n",
    "        end()常被用于Hook想要执行最后的操作，如保存最后一个checkpoint\n",
    "        如果sess.run()抛出除了代表迭代结束的OutOfRange/StopIteration异常外，\n",
    "        end()不会被调用\n",
    "        Args:\n",
    "          session: A TensorFlow Session that will be soon closed.\n",
    "        \"\"\"\n",
    "        print(\"5th\")\n",
    "        pass\n",
    "\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Calling model_fn.\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "Loss must be scalar, given: Tensor(\"add:0\", shape=(0,), dtype=float32)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-74-30d55edbfb98>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mtrain\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput_fn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minput_fn_builder\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mx_train\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0my\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mBATCH_SIZE\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mepochs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mEPOCH\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mis_train\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0msteps\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m10000\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mhooks\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mlogging_hook\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtestclsHook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32mE:\\Python\\virtualenv\\NER\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\estimator.py\u001b[0m in \u001b[0;36mtrain\u001b[1;34m(self, input_fn, hooks, steps, max_steps, saving_listeners)\u001b[0m\n\u001b[0;32m    356\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    357\u001b[0m       \u001b[0msaving_listeners\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_check_listeners_type\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msaving_listeners\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 358\u001b[1;33m       \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_train_model\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msaving_listeners\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    359\u001b[0m       \u001b[0mlogging\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Loss for final step: %s.'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    360\u001b[0m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mE:\\Python\\virtualenv\\NER\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\estimator.py\u001b[0m in \u001b[0;36m_train_model\u001b[1;34m(self, input_fn, hooks, saving_listeners)\u001b[0m\n\u001b[0;32m   1122\u001b[0m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_train_model_distributed\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msaving_listeners\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1123\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1124\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_train_model_default\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msaving_listeners\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1125\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1126\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_train_model_default\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msaving_listeners\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mE:\\Python\\virtualenv\\NER\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\estimator.py\u001b[0m in \u001b[0;36m_train_model_default\u001b[1;34m(self, input_fn, hooks, saving_listeners)\u001b[0m\n\u001b[0;32m   1152\u001b[0m       \u001b[0mworker_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput_hooks\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1153\u001b[0m       estimator_spec = self._call_model_fn(\n\u001b[1;32m-> 1154\u001b[1;33m           features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)\n\u001b[0m\u001b[0;32m   1155\u001b[0m       \u001b[0mglobal_step_tensor\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtraining_util\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_global_step\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1156\u001b[0m       return self._train_with_estimator_spec(estimator_spec, worker_hooks,\n",
      "\u001b[1;32mE:\\Python\\virtualenv\\NER\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\estimator.py\u001b[0m in \u001b[0;36m_call_model_fn\u001b[1;34m(self, features, labels, mode, config)\u001b[0m\n\u001b[0;32m   1110\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1111\u001b[0m     \u001b[0mlogging\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Calling model_fn.'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1112\u001b[1;33m     \u001b[0mmodel_fn_results\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_model_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1113\u001b[0m     \u001b[0mlogging\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Done calling model_fn.'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1114\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-67-8d37cc57f431>\u001b[0m in \u001b[0;36mmodel_fn\u001b[1;34m(features, labels, mode, params, config)\u001b[0m\n\u001b[0;32m     63\u001b[0m             \u001b[1;31m# train process\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     64\u001b[0m             \u001b[0mtrain_op\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mAdamOptimizer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlearning_rate\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mlr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mminimize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mglobal_step\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_global_step\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 65\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mEstimatorSpec\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtrain_op\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtrain_op\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0meval_metric_ops\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmetrics\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     66\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     67\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mE:\\Python\\virtualenv\\NER\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\model_fn.py\u001b[0m in \u001b[0;36m__new__\u001b[1;34m(cls, mode, predictions, loss, train_op, eval_metric_ops, export_outputs, training_chief_hooks, training_hooks, scaffold, evaluation_hooks, prediction_hooks)\u001b[0m\n\u001b[0;32m    184\u001b[0m     \"\"\"\n\u001b[0;32m    185\u001b[0m     \u001b[0mtrain_op\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_validate_estimator_spec_train_op\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_op\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 186\u001b[1;33m     \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_validate_estimator_spec_loss\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    187\u001b[0m     \u001b[0mpredictions\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_validate_estimator_spec_predictions\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpredictions\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    188\u001b[0m     export_outputs = _validate_estimator_spec_export_outputs(\n",
      "\u001b[1;32mE:\\Python\\virtualenv\\NER\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\model_fn.py\u001b[0m in \u001b[0;36m_validate_estimator_spec_loss\u001b[1;34m(loss, mode)\u001b[0m\n\u001b[0;32m    417\u001b[0m     \u001b[0mloss_shape\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_shape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    418\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mloss_shape\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnum_elements\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32min\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 419\u001b[1;33m       \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Loss must be scalar, given: {}'\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    420\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mloss_shape\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mis_compatible_with\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtensor_shape\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mscalar\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    421\u001b[0m       \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0marray_ops\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mValueError\u001b[0m: Loss must be scalar, given: Tensor(\"add:0\", shape=(0,), dtype=float32)"
     ]
    }
   ],
   "source": [
    "train=estimator.train(input_fn=input_fn_builder(x=x_train,y=y_train,batch_size=BATCH_SIZE,epochs=EPOCH,is_train=True),steps=10000,hooks=[logging_hook,testclsHook()])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on Estimator in module tensorflow_estimator.python.estimator.estimator object:\n",
      "\n",
      "class Estimator(EstimatorV2)\n",
      " |  Estimator class to train and evaluate TensorFlow models.\n",
      " |  \n",
      " |  The `Estimator` object wraps a model which is specified by a `model_fn`,\n",
      " |  which, given inputs and a number of other parameters, returns the ops\n",
      " |  necessary to perform training, evaluation, or predictions.\n",
      " |  \n",
      " |  All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a\n",
      " |  subdirectory thereof. If `model_dir` is not set, a temporary directory is\n",
      " |  used.\n",
      " |  \n",
      " |  The `config` argument can be passed `tf.estimator.RunConfig` object containing\n",
      " |  information about the execution environment. It is passed on to the\n",
      " |  `model_fn`, if the `model_fn` has a parameter named \"config\" (and input\n",
      " |  functions in the same manner). If the `config` parameter is not passed, it is\n",
      " |  instantiated by the `Estimator`. Not passing config means that defaults useful\n",
      " |  for local execution are used. `Estimator` makes config available to the model\n",
      " |  (for instance, to allow specialization based on the number of workers\n",
      " |  available), and also uses some of its fields to control internals, especially\n",
      " |  regarding checkpointing.\n",
      " |  \n",
      " |  The `params` argument contains hyperparameters. It is passed to the\n",
      " |  `model_fn`, if the `model_fn` has a parameter named \"params\", and to the input\n",
      " |  functions in the same manner. `Estimator` only passes params along, it does\n",
      " |  not inspect it. The structure of `params` is therefore entirely up to the\n",
      " |  developer.\n",
      " |  \n",
      " |  None of `Estimator`'s methods can be overridden in subclasses (its\n",
      " |  constructor enforces this). Subclasses should use `model_fn` to configure\n",
      " |  the base class, and may add methods implementing specialized functionality.\n",
      " |  \n",
      " |  @compatibility(eager)\n",
      " |  Calling methods of `Estimator` will work while eager execution is enabled.\n",
      " |  However, the `model_fn` and `input_fn` is not executed eagerly, `Estimator`\n",
      " |  will switch to graph mode before calling all user-provided functions (incl.\n",
      " |  hooks), so their code has to be compatible with graph mode execution. Note\n",
      " |  that `input_fn` code using `tf.data` generally works in both graph and eager\n",
      " |  modes.\n",
      " |  @end_compatibility\n",
      " |  \n",
      " |  Method resolution order:\n",
      " |      Estimator\n",
      " |      EstimatorV2\n",
      " |      builtins.object\n",
      " |  \n",
      " |  Methods defined here:\n",
      " |  \n",
      " |  export_saved_model(self, export_dir_base, serving_input_receiver_fn, assets_extra=None, as_text=False, checkpoint_path=None, experimental_mode='infer')\n",
      " |      Exports inference graph as a `SavedModel` into the given dir.\n",
      " |      \n",
      " |      For a detailed guide, see\n",
      " |      [Using SavedModel with Estimators](https://tensorflow.org/guide/saved_model#using_savedmodel_with_estimators).\n",
      " |      \n",
      " |      This method builds a new graph by first calling the\n",
      " |      `serving_input_receiver_fn` to obtain feature `Tensor`s, and then calling\n",
      " |      this `Estimator`'s `model_fn` to generate the model graph based on those\n",
      " |      features. It restores the given checkpoint (or, lacking that, the most\n",
      " |      recent checkpoint) into this graph in a fresh session.  Finally it creates\n",
      " |      a timestamped export directory below the given `export_dir_base`, and writes\n",
      " |      a `SavedModel` into it containing a single `tf.MetaGraphDef` saved from this\n",
      " |      session.\n",
      " |      \n",
      " |      The exported `MetaGraphDef` will provide one `SignatureDef` for each\n",
      " |      element of the `export_outputs` dict returned from the `model_fn`, named\n",
      " |      using\n",
      " |      the same keys.  One of these keys is always\n",
      " |      `tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,\n",
      " |      indicating which\n",
      " |      signature will be served when a serving request does not specify one.\n",
      " |      For each signature, the outputs are provided by the corresponding\n",
      " |      `tf.estimator.export.ExportOutput`s, and the inputs are always the input\n",
      " |      receivers provided by\n",
      " |      the `serving_input_receiver_fn`.\n",
      " |      \n",
      " |      Extra assets may be written into the `SavedModel` via the `assets_extra`\n",
      " |      argument.  This should be a dict, where each key gives a destination path\n",
      " |      (including the filename) relative to the assets.extra directory.  The\n",
      " |      corresponding value gives the full path of the source file to be copied.\n",
      " |      For example, the simple case of copying a single file without renaming it\n",
      " |      is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.\n",
      " |      \n",
      " |      The experimental_mode parameter can be used to export a single\n",
      " |      train/eval/predict graph as a `SavedModel`.\n",
      " |      See `experimental_export_all_saved_models` for full docs.\n",
      " |      \n",
      " |      Args:\n",
      " |        export_dir_base: A string containing a directory in which to create\n",
      " |          timestamped subdirectories containing exported `SavedModel`s.\n",
      " |        serving_input_receiver_fn: A function that takes no argument and returns a\n",
      " |          `tf.estimator.export.ServingInputReceiver` or\n",
      " |          `tf.estimator.export.TensorServingInputReceiver`.\n",
      " |        assets_extra: A dict specifying how to populate the assets.extra directory\n",
      " |          within the exported `SavedModel`, or `None` if no extra assets are\n",
      " |          needed.\n",
      " |        as_text: whether to write the `SavedModel` proto in text format.\n",
      " |        checkpoint_path: The checkpoint path to export.  If `None` (the default),\n",
      " |          the most recent checkpoint found within the model directory is chosen.\n",
      " |        experimental_mode: `tf.estimator.ModeKeys` value indicating with mode\n",
      " |          will be exported. Note that this feature is experimental.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The string path to the exported directory.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: if no `serving_input_receiver_fn` is provided, no\n",
      " |        `export_outputs` are provided, or no checkpoint can be found.\n",
      " |  \n",
      " |  export_savedmodel(self, export_dir_base, serving_input_receiver_fn, assets_extra=None, as_text=False, checkpoint_path=None, strip_default_attrs=False)\n",
      " |      Exports inference graph as a `SavedModel` into the given dir.\n",
      " |      \n",
      " |      For a detailed guide, see\n",
      " |      [Using SavedModel with Estimators](https://tensorflow.org/guide/saved_model#using_savedmodel_with_estimators).\n",
      " |      \n",
      " |      This method builds a new graph by first calling the\n",
      " |      `serving_input_receiver_fn` to obtain feature `Tensor`s, and then calling\n",
      " |      this `Estimator`'s `model_fn` to generate the model graph based on those\n",
      " |      features. It restores the given checkpoint (or, lacking that, the most\n",
      " |      recent checkpoint) into this graph in a fresh session.  Finally it creates\n",
      " |      a timestamped export directory below the given `export_dir_base`, and writes\n",
      " |      a `SavedModel` into it containing a single `tf.MetaGraphDef` saved from this\n",
      " |      session.\n",
      " |      \n",
      " |      The exported `MetaGraphDef` will provide one `SignatureDef` for each\n",
      " |      element of the `export_outputs` dict returned from the `model_fn`, named\n",
      " |      using\n",
      " |      the same keys.  One of these keys is always\n",
      " |      `tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,\n",
      " |      indicating which\n",
      " |      signature will be served when a serving request does not specify one.\n",
      " |      For each signature, the outputs are provided by the corresponding\n",
      " |      `tf.estimator.export.ExportOutput`s, and the inputs are always the input\n",
      " |      receivers provided by\n",
      " |      the `serving_input_receiver_fn`.\n",
      " |      \n",
      " |      Extra assets may be written into the `SavedModel` via the `assets_extra`\n",
      " |      argument.  This should be a dict, where each key gives a destination path\n",
      " |      (including the filename) relative to the assets.extra directory.  The\n",
      " |      corresponding value gives the full path of the source file to be copied.\n",
      " |      For example, the simple case of copying a single file without renaming it\n",
      " |      is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.\n",
      " |      \n",
      " |      Args:\n",
      " |        export_dir_base: A string containing a directory in which to create\n",
      " |          timestamped subdirectories containing exported `SavedModel`s.\n",
      " |        serving_input_receiver_fn: A function that takes no argument and returns a\n",
      " |          `tf.estimator.export.ServingInputReceiver` or\n",
      " |          `tf.estimator.export.TensorServingInputReceiver`.\n",
      " |        assets_extra: A dict specifying how to populate the assets.extra directory\n",
      " |          within the exported `SavedModel`, or `None` if no extra assets are\n",
      " |          needed.\n",
      " |        as_text: whether to write the `SavedModel` proto in text format.\n",
      " |        checkpoint_path: The checkpoint path to export.  If `None` (the default),\n",
      " |          the most recent checkpoint found within the model directory is chosen.\n",
      " |        strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n",
      " |          removed from the `NodeDef`s. For a detailed guide, see [Stripping\n",
      " |          Default-Valued Attributes](\n",
      " |          https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n",
      " |      \n",
      " |      Returns:\n",
      " |        The string path to the exported directory.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: if no `serving_input_receiver_fn` is provided, no\n",
      " |        `export_outputs` are provided, or no checkpoint can be found.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from EstimatorV2:\n",
      " |  \n",
      " |  __init__(self, model_fn, model_dir=None, config=None, params=None, warm_start_from=None)\n",
      " |      Constructs an `Estimator` instance.\n",
      " |      \n",
      " |      See [estimators](https://tensorflow.org/guide/estimators) for more\n",
      " |      information.\n",
      " |      \n",
      " |      To warm-start an `Estimator`:\n",
      " |      \n",
      " |      ```python\n",
      " |      estimator = tf.estimator.DNNClassifier(\n",
      " |          feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n",
      " |          hidden_units=[1024, 512, 256],\n",
      " |          warm_start_from=\"/path/to/checkpoint/dir\")\n",
      " |      ```\n",
      " |      \n",
      " |      For more details on warm-start configuration, see\n",
      " |      `tf.estimator.WarmStartSettings`.\n",
      " |      \n",
      " |      Args:\n",
      " |        model_fn: Model function. Follows the signature:\n",
      " |      \n",
      " |          * Args:\n",
      " |      \n",
      " |            * `features`: This is the first item returned from the `input_fn`\n",
      " |                   passed to `train`, `evaluate`, and `predict`. This should be a\n",
      " |                   single `tf.Tensor` or `dict` of same.\n",
      " |            * `labels`: This is the second item returned from the `input_fn`\n",
      " |                   passed to `train`, `evaluate`, and `predict`. This should be a\n",
      " |                   single `tf.Tensor` or `dict` of same (for multi-head models).\n",
      " |                   If mode is `tf.estimator.ModeKeys.PREDICT`, `labels=None` will\n",
      " |                   be passed. If the `model_fn`'s signature does not accept\n",
      " |                   `mode`, the `model_fn` must still be able to handle\n",
      " |                   `labels=None`.\n",
      " |            * `mode`: Optional. Specifies if this training, evaluation or\n",
      " |                   prediction. See `tf.estimator.ModeKeys`.\n",
      " |            * `params`: Optional `dict` of hyperparameters.  Will receive what\n",
      " |                   is passed to Estimator in `params` parameter. This allows\n",
      " |                   to configure Estimators from hyper parameter tuning.\n",
      " |            * `config`: Optional `estimator.RunConfig` object. Will receive what\n",
      " |                   is passed to Estimator as its `config` parameter, or a default\n",
      " |                   value. Allows setting up things in your `model_fn` based on\n",
      " |                   configuration such as `num_ps_replicas`, or `model_dir`.\n",
      " |      \n",
      " |          * Returns:\n",
      " |            `tf.estimator.EstimatorSpec`\n",
      " |      \n",
      " |        model_dir: Directory to save model parameters, graph and etc. This can\n",
      " |          also be used to load checkpoints from the directory into an estimator to\n",
      " |          continue training a previously saved model. If `PathLike` object, the\n",
      " |          path will be resolved. If `None`, the model_dir in `config` will be used\n",
      " |          if set. If both are set, they must be same. If both are `None`, a\n",
      " |          temporary directory will be used.\n",
      " |        config: `estimator.RunConfig` configuration object.\n",
      " |        params: `dict` of hyper parameters that will be passed into `model_fn`.\n",
      " |                Keys are names of parameters, values are basic python types.\n",
      " |        warm_start_from: Optional string filepath to a checkpoint or SavedModel to\n",
      " |                         warm-start from, or a `tf.estimator.WarmStartSettings`\n",
      " |                         object to fully configure warm-starting.  If the string\n",
      " |                         filepath is provided instead of a\n",
      " |                         `tf.estimator.WarmStartSettings`, then all variables are\n",
      " |                         warm-started, and it is assumed that vocabularies\n",
      " |                         and `tf.Tensor` names are unchanged.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: parameters of `model_fn` don't match `params`.\n",
      " |        ValueError: if this is called via a subclass and if that class overrides\n",
      " |          a member of `Estimator`.\n",
      " |  \n",
      " |  eval_dir(self, name=None)\n",
      " |      Shows the directory name where evaluation metrics are dumped.\n",
      " |      \n",
      " |      Args:\n",
      " |        name: Name of the evaluation if user needs to run multiple evaluations on\n",
      " |          different data sets, such as on training data vs test data. Metrics for\n",
      " |          different evaluations are saved in separate folders, and appear\n",
      " |          separately in tensorboard.\n",
      " |      \n",
      " |      Returns:\n",
      " |        A string which is the path of directory contains evaluation metrics.\n",
      " |  \n",
      " |  evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None, name=None)\n",
      " |      Evaluates the model given evaluation data `input_fn`.\n",
      " |      \n",
      " |      For each step, calls `input_fn`, which returns one batch of data.\n",
      " |      Evaluates until:\n",
      " |      - `steps` batches are processed, or\n",
      " |      - `input_fn` raises an end-of-input exception (`tf.errors.OutOfRangeError`\n",
      " |      or\n",
      " |      `StopIteration`).\n",
      " |      \n",
      " |      Args:\n",
      " |        input_fn: A function that constructs the input data for evaluation. See\n",
      " |          [Premade Estimators](\n",
      " |          https://tensorflow.org/guide/premade#create_input_functions)\n",
      " |          for more information. The\n",
      " |          function should construct and return one of the following:  * A\n",
      " |          `tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple\n",
      " |          `(features, labels)` with same constraints as below. * A tuple\n",
      " |          `(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary\n",
      " |          of string feature name to `Tensor` and `labels` is a `Tensor` or a\n",
      " |          dictionary of string label name to `Tensor`. Both `features` and\n",
      " |          `labels` are consumed by `model_fn`. They should satisfy the expectation\n",
      " |          of `model_fn` from inputs.\n",
      " |        steps: Number of steps for which to evaluate model. If `None`, evaluates\n",
      " |          until `input_fn` raises an end-of-input exception.\n",
      " |        hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n",
      " |          callbacks inside the evaluation call.\n",
      " |        checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the\n",
      " |          latest checkpoint in `model_dir` is used.  If there are no checkpoints\n",
      " |          in `model_dir`, evaluation is run with newly initialized `Variables`\n",
      " |          instead of ones restored from checkpoint.\n",
      " |        name: Name of the evaluation if user needs to run multiple evaluations on\n",
      " |          different data sets, such as on training data vs test data. Metrics for\n",
      " |          different evaluations are saved in separate folders, and appear\n",
      " |          separately in tensorboard.\n",
      " |      \n",
      " |      Returns:\n",
      " |        A dict containing the evaluation metrics specified in `model_fn` keyed by\n",
      " |        name, as well as an entry `global_step` which contains the value of the\n",
      " |        global step for which this evaluation was performed. For canned\n",
      " |        estimators, the dict contains the `loss` (mean loss per mini-batch) and\n",
      " |        the `average_loss` (mean loss per sample). Canned classifiers also return\n",
      " |        the `accuracy`. Canned regressors also return the `label/mean` and the\n",
      " |        `prediction/mean`.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If `steps <= 0`.\n",
      " |        ValueError: If no model has been trained, namely `model_dir`, or the\n",
      " |          given `checkpoint_path` is empty.\n",
      " |  \n",
      " |  experimental_export_all_saved_models(self, export_dir_base, input_receiver_fn_map, assets_extra=None, as_text=False, checkpoint_path=None)\n",
      " |      Exports a `SavedModel` with `tf.MetaGraphDefs` for each requested mode.\n",
      " |      \n",
      " |      For each mode passed in via the `input_receiver_fn_map`,\n",
      " |      this method builds a new graph by calling the `input_receiver_fn` to obtain\n",
      " |      feature and label `Tensor`s. Next, this method calls the `Estimator`'s\n",
      " |      `model_fn` in the passed mode to generate the model graph based on\n",
      " |      those features and labels, and restores the given checkpoint\n",
      " |      (or, lacking that, the most recent checkpoint) into the graph.\n",
      " |      Only one of the modes is used for saving variables to the `SavedModel`\n",
      " |      (order of preference: `tf.estimator.ModeKeys.TRAIN`,\n",
      " |      `tf.estimator.ModeKeys.EVAL`, then\n",
      " |      `tf.estimator.ModeKeys.PREDICT`), such that up to three\n",
      " |      `tf.MetaGraphDefs` are saved with a single set of variables in a single\n",
      " |      `SavedModel` directory.\n",
      " |      \n",
      " |      For the variables and `tf.MetaGraphDefs`, a timestamped export directory\n",
      " |      below\n",
      " |      `export_dir_base`, and writes a `SavedModel` into it containing\n",
      " |      the `tf.MetaGraphDef` for the given mode and its associated signatures.\n",
      " |      \n",
      " |      For prediction, the exported `MetaGraphDef` will provide one `SignatureDef`\n",
      " |      for each element of the `export_outputs` dict returned from the `model_fn`,\n",
      " |      named using the same keys.  One of these keys is always\n",
      " |      `tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,\n",
      " |      indicating which\n",
      " |      signature will be served when a serving request does not specify one.\n",
      " |      For each signature, the outputs are provided by the corresponding\n",
      " |      `tf.estimator.export.ExportOutput`s, and the inputs are always the input\n",
      " |      receivers provided by\n",
      " |      the `serving_input_receiver_fn`.\n",
      " |      \n",
      " |      For training and evaluation, the `train_op` is stored in an extra\n",
      " |      collection,\n",
      " |      and loss, metrics, and predictions are included in a `SignatureDef` for the\n",
      " |      mode in question.\n",
      " |      \n",
      " |      Extra assets may be written into the `SavedModel` via the `assets_extra`\n",
      " |      argument.  This should be a dict, where each key gives a destination path\n",
      " |      (including the filename) relative to the assets.extra directory.  The\n",
      " |      corresponding value gives the full path of the source file to be copied.\n",
      " |      For example, the simple case of copying a single file without renaming it\n",
      " |      is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.\n",
      " |      \n",
      " |      Args:\n",
      " |        export_dir_base: A string containing a directory in which to create\n",
      " |          timestamped subdirectories containing exported `SavedModel`s.\n",
      " |        input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to\n",
      " |          `input_receiver_fn` mappings, where the `input_receiver_fn` is a\n",
      " |          function that takes no arguments and returns the appropriate subclass of\n",
      " |          `InputReceiver`.\n",
      " |        assets_extra: A dict specifying how to populate the assets.extra directory\n",
      " |          within the exported `SavedModel`, or `None` if no extra assets are\n",
      " |          needed.\n",
      " |        as_text: whether to write the `SavedModel` proto in text format.\n",
      " |        checkpoint_path: The checkpoint path to export.  If `None` (the default),\n",
      " |          the most recent checkpoint found within the model directory is chosen.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The string path to the exported directory.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: if any `input_receiver_fn` is `None`, no `export_outputs`\n",
      " |          are provided, or no checkpoint can be found.\n",
      " |  \n",
      " |  get_variable_names(self)\n",
      " |      Returns list of all variable names in this model.\n",
      " |      \n",
      " |      Returns:\n",
      " |        List of names.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If the `Estimator` has not produced a checkpoint yet.\n",
      " |  \n",
      " |  get_variable_value(self, name)\n",
      " |      Returns value of the variable given by name.\n",
      " |      \n",
      " |      Args:\n",
      " |        name: string or a list of string, name of the tensor.\n",
      " |      \n",
      " |      Returns:\n",
      " |        Numpy array - value of the tensor.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If the `Estimator` has not produced a checkpoint yet.\n",
      " |  \n",
      " |  latest_checkpoint(self)\n",
      " |      Finds the filename of the latest saved checkpoint file in `model_dir`.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The full path to the latest checkpoint or `None` if no checkpoint was\n",
      " |        found.\n",
      " |  \n",
      " |  predict(self, input_fn, predict_keys=None, hooks=None, checkpoint_path=None, yield_single_examples=True)\n",
      " |      Yields predictions for given features.\n",
      " |      \n",
      " |      Please note that interleaving two predict outputs does not work. See:\n",
      " |      [issue/20506](\n",
      " |      https://github.com/tensorflow/tensorflow/issues/20506#issuecomment-422208517)\n",
      " |      \n",
      " |      Args:\n",
      " |        input_fn: A function that constructs the features. Prediction continues\n",
      " |          until `input_fn` raises an end-of-input exception\n",
      " |          (`tf.errors.OutOfRangeError` or `StopIteration`).\n",
      " |          See [Premade Estimators](\n",
      " |          https://tensorflow.org/guide/premade_estimators#create_input_functions)\n",
      " |          for more information. The function should construct and return one of\n",
      " |          the following:\n",
      " |      \n",
      " |            * A `tf.data.Dataset` object: Outputs of `Dataset` object must have\n",
      " |              same constraints as below.\n",
      " |            * features: A `tf.Tensor` or a dictionary of string feature name to\n",
      " |              `Tensor`. features are consumed by `model_fn`. They should satisfy\n",
      " |              the expectation of `model_fn` from inputs.\n",
      " |            * A tuple, in which case the first item is extracted as features.\n",
      " |      \n",
      " |        predict_keys: list of `str`, name of the keys to predict. It is used if\n",
      " |          the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If\n",
      " |          `predict_keys` is used then rest of the predictions will be filtered\n",
      " |          from the dictionary. If `None`, returns all.\n",
      " |        hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n",
      " |          callbacks inside the prediction call.\n",
      " |        checkpoint_path: Path of a specific checkpoint to predict. If `None`, the\n",
      " |          latest checkpoint in `model_dir` is used.  If there are no checkpoints\n",
      " |          in `model_dir`, prediction is run with newly initialized `Variables`\n",
      " |          instead of ones restored from checkpoint.\n",
      " |        yield_single_examples: If `False`, yields the whole batch as returned by\n",
      " |          the `model_fn` instead of decomposing the batch into individual\n",
      " |          elements. This is useful if `model_fn` returns some tensors whose first\n",
      " |          dimension is not equal to the batch size.\n",
      " |      \n",
      " |      Yields:\n",
      " |        Evaluated values of `predictions` tensors.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: Could not find a trained model in `model_dir`.\n",
      " |        ValueError: If batch length of predictions is not the same and\n",
      " |          `yield_single_examples` is `True`.\n",
      " |        ValueError: If there is a conflict between `predict_keys` and\n",
      " |          `predictions`. For example if `predict_keys` is not `None` but\n",
      " |          `tf.estimator.EstimatorSpec.predictions` is not a `dict`.\n",
      " |  \n",
      " |  train(self, input_fn, hooks=None, steps=None, max_steps=None, saving_listeners=None)\n",
      " |      Trains a model given training data `input_fn`.\n",
      " |      \n",
      " |      Args:\n",
      " |        input_fn: A function that provides input data for training as minibatches.\n",
      " |          See [Premade Estimators](\n",
      " |          https://tensorflow.org/guide/premade_estimators#create_input_functions)\n",
      " |          for more information. The function should construct and return one of\n",
      " |          the following:  * A\n",
      " |          `tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple\n",
      " |          `(features, labels)` with same constraints as below. * A tuple\n",
      " |          `(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary\n",
      " |          of string feature name to `Tensor` and `labels` is a `Tensor` or a\n",
      " |          dictionary of string label name to `Tensor`. Both `features` and\n",
      " |          `labels` are consumed by `model_fn`. They should satisfy the expectation\n",
      " |          of `model_fn` from inputs.\n",
      " |        hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n",
      " |          callbacks inside the training loop.\n",
      " |        steps: Number of steps for which to train the model. If `None`, train\n",
      " |          forever or train until `input_fn` generates the `tf.errors.OutOfRange`\n",
      " |          error or `StopIteration` exception. `steps` works incrementally. If you\n",
      " |          call two times `train(steps=10)` then training occurs in total 20 steps.\n",
      " |          If `OutOfRange` or `StopIteration` occurs in the middle, training stops\n",
      " |          before 20 steps. If you don't want to have incremental behavior please\n",
      " |          set `max_steps` instead. If set, `max_steps` must be `None`.\n",
      " |        max_steps: Number of total steps for which to train model. If `None`,\n",
      " |          train forever or train until `input_fn` generates the\n",
      " |          `tf.errors.OutOfRange` error or `StopIteration` exception. If set,\n",
      " |          `steps` must be `None`. If `OutOfRange` or `StopIteration` occurs in the\n",
      " |          middle, training stops before `max_steps` steps. Two calls to\n",
      " |          `train(steps=100)` means 200 training iterations. On the other hand, two\n",
      " |          calls to `train(max_steps=100)` means that the second call will not do\n",
      " |          any iteration since first call did all 100 steps.\n",
      " |        saving_listeners: list of `CheckpointSaverListener` objects. Used for\n",
      " |          callbacks that run immediately before or after checkpoint savings.\n",
      " |      \n",
      " |      Returns:\n",
      " |        `self`, for chaining.\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If both `steps` and `max_steps` are not `None`.\n",
      " |        ValueError: If either `steps` or `max_steps <= 0`.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors inherited from EstimatorV2:\n",
      " |  \n",
      " |  __dict__\n",
      " |      dictionary for instance variables (if defined)\n",
      " |  \n",
      " |  __weakref__\n",
      " |      list of weak references to the object (if defined)\n",
      " |  \n",
      " |  config\n",
      " |  \n",
      " |  model_dir\n",
      " |  \n",
      " |  model_fn\n",
      " |      Returns the `model_fn` which is bound to `self.params`.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The `model_fn` with following signature:\n",
      " |          `def model_fn(features, labels, mode, config)`\n",
      " |  \n",
      " |  params\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Starting evaluation at 2019-06-06T06:50:05Z\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from F:\\testDemo\\AI\\estimator\\model\\fashion\\model.ckpt-30000\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Finished evaluation at 2019-06-06-06:50:06\n",
      "INFO:tensorflow:Saving dict for global step 30000: accuracy = 0.8644, global_step = 30000, loss = 0.7665193, precision = 0.9817285, recall = 0.9731111\n",
      "INFO:tensorflow:Saving 'checkpoint_path' summary for global step 30000: F:\\testDemo\\AI\\estimator\\model\\fashion\\model.ckpt-30000\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'accuracy': 0.8644,\n",
       " 'loss': 0.7665193,\n",
       " 'precision': 0.9817285,\n",
       " 'recall': 0.9731111,\n",
       " 'global_step': 30000}"
      ]
     },
     "execution_count": 106,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "estimator.evaluate(input_fn=input_fn_builder(x=x_test,y=y_test,batch_size=BATCH_SIZE,epochs=EPOCH,is_train=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict_fn_builder(x,batch_size):\n",
    "    '''\n",
    "    创建 输入函数闭包\n",
    "    '''\n",
    "    # 可以执行其它操作\n",
    "    \n",
    "    def input_fn():\n",
    "        dataset=tf.data.Dataset.from_tensor_slices(({'images':x}) )  \n",
    "\n",
    "        dataset=dataset.batch(batch_size)\n",
    "        return dataset # 返回的 顺序要和 model_fn一致 或者 dataset元素 格式为（features,label）元组 也可以\n",
    "    return input_fn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([9, 0, 0, 3, 0, 2, 7, 2, 5, 5], dtype=uint8)"
      ]
     },
     "execution_count": 108,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pre_dataset=x_train[0:10]\n",
    "pre_y=y_train[0:10]\n",
    "pre_y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [],
   "source": [
    "predictions=estimator.predict(input_fn=predict_fn_builder(x=x_train[0:10],batch_size=10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from F:\\testDemo\\AI\\estimator\\model\\fashion\\model.ckpt-30000\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "{'predict_cls': 9, 'predict_pro': array([0.0000000e+00, 9.7136024e-32, 4.8922935e-37, 2.4414274e-34,\n",
      "       2.6000109e-38, 5.0287287e-18, 7.1402950e-31, 8.2528209e-07,\n",
      "       3.1106856e-26, 9.9999917e-01], dtype=float32)}\n",
      "{'predict_cls': 0, 'predict_pro': array([9.9999535e-01, 4.0889493e-34, 2.9028916e-15, 6.8556294e-09,\n",
      "       3.9609319e-15, 8.1493402e-24, 4.6315395e-06, 2.0719370e-12,\n",
      "       1.2751717e-27, 3.6395057e-27], dtype=float32)}\n",
      "{'predict_cls': 0, 'predict_pro': array([9.9918240e-01, 5.6884212e-14, 6.2230316e-07, 6.4789201e-05,\n",
      "       2.0265060e-10, 3.8250029e-14, 7.5217051e-04, 1.1419887e-18,\n",
      "       4.0469545e-19, 1.0197527e-30], dtype=float32)}\n",
      "{'predict_cls': 6, 'predict_pro': array([8.4250763e-02, 3.3461257e-15, 2.9126289e-13, 2.9308584e-01,\n",
      "       1.5119933e-11, 7.4126037e-17, 6.2266308e-01, 3.2409324e-07,\n",
      "       7.5717800e-20, 7.0771971e-19], dtype=float32)}\n",
      "{'predict_cls': 0, 'predict_pro': array([9.5720196e-01, 6.1046006e-03, 1.8754016e-11, 3.5721440e-02,\n",
      "       1.1038952e-11, 2.0867729e-19, 9.7194972e-04, 3.7313208e-17,\n",
      "       5.6631888e-17, 7.1978824e-24], dtype=float32)}\n",
      "{'predict_cls': 2, 'predict_pro': array([5.3014111e-07, 3.1882837e-29, 9.9999356e-01, 5.7459650e-23,\n",
      "       6.2342070e-10, 0.0000000e+00, 5.9204581e-06, 2.9051021e-30,\n",
      "       1.1086539e-23, 1.8631410e-27], dtype=float32)}\n",
      "{'predict_cls': 7, 'predict_pro': array([6.61900606e-24, 0.00000000e+00, 0.00000000e+00, 2.71915030e-32,\n",
      "       0.00000000e+00, 2.09421813e-25, 1.35108555e-20, 1.00000000e+00,\n",
      "       1.83153775e-30, 2.03190374e-22], dtype=float32)}\n",
      "{'predict_cls': 2, 'predict_pro': array([1.0009290e-14, 3.5013583e-32, 9.9801183e-01, 3.8271048e-27,\n",
      "       1.9662476e-03, 4.1366399e-32, 2.1944619e-05, 3.1791603e-23,\n",
      "       1.1301711e-15, 1.4305268e-19], dtype=float32)}\n",
      "{'predict_cls': 5, 'predict_pro': array([0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], dtype=float32)}\n",
      "{'predict_cls': 5, 'predict_pro': array([0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], dtype=float32)}\n"
     ]
    }
   ],
   "source": [
    "for p in predictions:\n",
    "    print(p)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    " save as image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from PIL import Image, ImageOps\n",
    "import os\n",
    "def save_image(filename, data_array):\n",
    "    im = Image.fromarray(data_array.astype('uint8'))\n",
    "    im_invert = ImageOps.invert(im)\n",
    "    im_invert.save(filename)\n",
    "\n",
    "i=0\n",
    "for item in x_train:\n",
    "    i+=1\n",
    "    filename='{}/{}.jpg'.format('data/fashion/images',str(i))    \n",
    "    save_image(filename=filename,data_array=item)\n",
    "    if i==10:\n",
    "        break\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Classify: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Regress: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Predict: ['serving_default']\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Train: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Eval: None\n",
      "INFO:tensorflow:Restoring parameters from F:\\testDemo\\AI\\estimator\\model\\fashion\\model.ckpt-30000\n",
      "INFO:tensorflow:Assets added to graph.\n",
      "INFO:tensorflow:No assets to write.\n",
      "INFO:tensorflow:SavedModel written to: export_base/fashion\\temp-b'1559803909'\\saved_model.pb\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "b'export_base/fashion\\\\1559803909'"
      ]
     },
     "execution_count": 114,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "def serving_input_receiver_fn():\n",
    "    def decode_and_resize(image_str_tensor):\n",
    "        \"\"\"Decodes jpeg string, resizes it and returns a uint8 tensor.\"\"\"\n",
    "        image = tf.image.decode_jpeg(image_str_tensor, channels=1) # 在此处 是 使用灰度图，所以channel=1\n",
    "        image = tf.expand_dims(image, 0)\n",
    "        image = tf.image.resize_bilinear(\n",
    "             image, [28, 28], align_corners=False)# 根据您 自己图的 大小设置 height width\n",
    "        image = tf.squeeze(image, squeeze_dims=[0])\n",
    "        image = tf.cast(image, dtype=tf.uint8)\n",
    "        return image\n",
    "\n",
    "     # Optional; currently necessary for batch prediction.\n",
    "    key_input = tf.placeholder(tf.string, shape=[None]) \n",
    "    key_output = tf.identity(key_input)\n",
    "\n",
    "    input_ph = tf.placeholder(tf.string, shape=[None], name='image_binary')\n",
    "    images_tensor = tf.map_fn(\n",
    "      decode_and_resize, input_ph, back_prop=False, dtype=tf.uint8)\n",
    "    receiver_tensors = {'image_bytes': input_ph}\n",
    "   \n",
    "    features = {\n",
    "       'images': images_tensor\n",
    "    }    \n",
    "    return tf.estimator.export.ServingInputReceiver(features,receiver_tensors)\n",
    "\n",
    "estimator.export_savedmodel('export_base/fashion',serving_input_receiver_fn=serving_input_receiver_fn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 请求体\n",
    "{\"instances\" : [{\"image_bytes\": {\"b64\":\"/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/wAALCAAcABwBAREA/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/9oACAEBAAA/APf6yfEuvW/hnw/d6vcjdHbpkIDguScAD6k1heHfiVomuQKZJhbS7QW3/cz35H3focV0Nrr2l3sbSQXsTKrbTk45xnofYg/jWlmvHPjzrdk2g2OjxTwTXTXnnSRI+541RGGSB0yWHXHtnnHhayiFo3t1khkAHIYg59RzxX0L8FkvP+EGkmt54JhPeySO0xO8PtVWBx15XgntivNfGupeMB4o1K2vdVv4P3zmO3ErpF5efl2qDgjGORn35rkoFWCYm4tXuEZCH2Hy2Vuu5WIOCD6gggkHrkRi0juHdIYliV+WWWcuWx0zgBf0r2X4O319o/hO8t4dMmu0bUHfzIVLLnZGMZ/CvXNR0nTtXgEGpWFreRA5CXESyAH1AI4NZUPgPwjbvvj8M6SG9TaIf5irY8LeHl6aDpY+lnH/AIVqRxRwxrHEioi8BVGAPwr/2Q==\"}},\n",
    "{\"image_bytes\": {\"b64\":\"/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/wAALCAAcABwBAREA/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/9oACAEBAAA/AO9+LPjO88KaPZQabKsN9fyMFlKhikaAbiAeM5ZRznqa8eitvEWqLFqk3iuG2luot6NdazLDM0YlMY4UH5d+QBTRqXibweltf2HiSOeC4eQRtZ37XMLum3cHVhjd868kZ5619K6FqkeuaDp+qRLtS7t0m2/3SwBI/A8fhXN+Ovh3YeNfLuLi8vYLm3iZIfJdSnPPKsD3x0x0FfP9l4r8R6XZQWdlrFxbQQAiOONI8JnJOMqT1LdT3rd8LQXvxJ8U2+meItXvZ4I45ZwyeWrrwo4wuOcLnIPAr6G0HRbXw7oltpNk0rW9upCGVtzHJJOT9Sa0a+MXbnJJOMDmvQPgy23x/D/tWsyfop/pX0dRXy5/witnO+Tc3S7iSQpT1Pqtdr8NPDltpnjG1uIri5dhFLxIUxyuOyivbq//2Q==\"}}\n",
    "]}\n",
    "# 返回\n",
    "{\n",
    "    \"predictions\": [\n",
    "        {\n",
    "            \"predict_pro\": [\n",
    "                0.0011323,\n",
    "                0.000574304,\n",
    "                0.00000434548,\n",
    "                0.0000422241,\n",
    "                1.86852e-8,\n",
    "                3.08857e-13,\n",
    "                0.00000731479,\n",
    "                5.13255e-9,\n",
    "                0.99824,\n",
    "                1.81437e-10\n",
    "            ],\n",
    "            \"predict_cls\": 8\n",
    "        },\n",
    "        {\n",
    "            \"predict_pro\": [\n",
    "                7.66782e-8,\n",
    "                4.37143e-13,\n",
    "                0.000230125,\n",
    "                3.37866e-9,\n",
    "                0.998791,\n",
    "                7.81147e-10,\n",
    "                0.000125866,\n",
    "                1.52834e-8,\n",
    "                0.00085307,\n",
    "                1.49522e-16\n",
    "            ],\n",
    "            \"predict_cls\": 4\n",
    "        }\n",
    "    ]\n",
    "}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py3_NER",
   "language": "python",
   "name": "ner"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
