{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "### 测试MLU的支持操作\n",
    "不支持优化器训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    " \n",
    "# Import data\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    " \n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "ename": "DuplicateFlagError",
     "evalue": "The flag 'data_dir' is defined twice. First from /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/ipykernel_launcher.py, Second from /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/ipykernel_launcher.py.  Description from first occurrence: Directory for storing data",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mDuplicateFlagError\u001b[0m                        Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-31-53a0e5585870>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0mflags\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mflags\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[0mFLAGS\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflags\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mflags\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDEFINE_string\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'data_dir'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'/tmp/data/'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Directory for storing data'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# 把数据放在/tmp/data文件夹中\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m~/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/platform/flags.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m     56\u001b[0m           \u001b[0;34m'Use of the keyword argument names (flag_name, default_value, '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     57\u001b[0m           'docstring) is deprecated, please use (name, default, help) instead.')\n\u001b[0;32m---> 58\u001b[0;31m     \u001b[0;32mreturn\u001b[0m \u001b[0moriginal_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     59\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     60\u001b[0m   \u001b[0;32mreturn\u001b[0m \u001b[0mtf_decorator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake_decorator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moriginal_function\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.7/site-packages/absl/flags/_defines.py\u001b[0m in \u001b[0;36mDEFINE_string\u001b[0;34m(name, default, help, flag_values, **args)\u001b[0m\n\u001b[1;32m    239\u001b[0m   \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_argument_parser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mArgumentParser\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    240\u001b[0m   \u001b[0mserializer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_argument_parser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mArgumentSerializer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 241\u001b[0;31m   \u001b[0mDEFINE\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparser\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdefault\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhelp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mflag_values\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mserializer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    242\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    243\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.7/site-packages/absl/flags/_defines.py\u001b[0m in \u001b[0;36mDEFINE\u001b[0;34m(parser, name, default, help, flag_values, serializer, module_name, **args)\u001b[0m\n\u001b[1;32m     80\u001b[0m   \"\"\"\n\u001b[1;32m     81\u001b[0m   DEFINE_flag(_flag.Flag(parser, serializer, name, default, help, **args),\n\u001b[0;32m---> 82\u001b[0;31m               flag_values, module_name)\n\u001b[0m\u001b[1;32m     83\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     84\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.7/site-packages/absl/flags/_defines.py\u001b[0m in \u001b[0;36mDEFINE_flag\u001b[0;34m(flag, flag_values, module_name)\u001b[0m\n\u001b[1;32m    102\u001b[0m   \u001b[0;31m# Copying the reference to flag_values prevents pychecker warnings.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    103\u001b[0m   \u001b[0mfv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflag_values\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 104\u001b[0;31m   \u001b[0mfv\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mflag\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflag\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    105\u001b[0m   \u001b[0;31m# Tell flag_values who's defining the flag.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    106\u001b[0m   \u001b[0;32mif\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.7/site-packages/absl/flags/_flagvalues.py\u001b[0m in \u001b[0;36m__setitem__\u001b[0;34m(self, name, flag)\u001b[0m\n\u001b[1;32m    428\u001b[0m         \u001b[0;31m# module is simply being imported a subsequent time.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    429\u001b[0m         \u001b[0;32mreturn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 430\u001b[0;31m       \u001b[0;32mraise\u001b[0m \u001b[0m_exceptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDuplicateFlagError\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_flag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    431\u001b[0m     \u001b[0mshort_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflag\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshort_name\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    432\u001b[0m     \u001b[0;31m# If a new flag overrides an old one, we need to cleanup the old flag's\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mDuplicateFlagError\u001b[0m: The flag 'data_dir' is defined twice. First from /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/ipykernel_launcher.py, Second from /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/ipykernel_launcher.py.  Description from first occurrence: Directory for storing data"
     ]
    }
   ],
   "source": [
    "flags = tf.app.flags\n",
    "FLAGS = flags.FLAGS\n",
    "flags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data') # 把数据放在/tmp/data文件夹中"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "x = tf.placeholder(tf.float32, [None, 784]) # 占位符\n",
    "y = tf.placeholder(tf.float32, [None, 10])\n",
    "W = tf.Variable(tf.zeros([784, 10]))\n",
    "b = tf.Variable(tf.zeros([10]))\n",
    "a = tf.nn.softmax(tf.matmul(x, W) + b)\n",
    " \n",
    "# 定义损失函数和训练方法\n",
    "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(a), reduction_indices=[1]))  # 损失函数为交叉熵\n",
    "optimizer = tf.train.GradientDescentOptimizer(0.5) # 梯度下降法，学习速率为0.5\n",
    "train = optimizer.minimize(cross_entropy) # 训练目标：最小化损失函数\n",
    " \n",
    "# Test trained model\n",
    "correct_prediction = tf.equal(tf.argmax(a, 1), tf.argmax(y, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "# Train\n",
    "sess = tf.Session()      # 建立交互式会话"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[0., 0., 0., ..., 0., 0., 0.],\n",
       "       [0., 0., 0., ..., 0., 0., 0.],\n",
       "       [0., 0., 0., ..., 0., 0., 0.],\n",
       "       ...,\n",
       "       [0., 0., 0., ..., 0., 0., 0.],\n",
       "       [0., 0., 0., ..., 0., 0., 0.],\n",
       "       [0., 0., 0., ..., 0., 0., 0.]], dtype=float32)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sess.run(W)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W1112 15:45:36.016835 140605818394432 deprecation.py:323] From /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/util/tf_should_use.py:193: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
      "Instructions for updating:\n",
      "Use `tf.global_variables_initializer` instead.\n"
     ]
    }
   ],
   "source": [
    "with sess.as_default():\n",
    "    tf.initialize_all_variables().run()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "acc: 0.07\n",
      "acc: 0.11\n",
      "acc: 0.06\n",
      "acc: 0.09\n",
      "acc: 0.08\n",
      "acc: 0.11\n",
      "acc: 0.06\n",
      "acc: 0.12\n",
      "acc: 0.13\n",
      "acc: 0.09\n"
     ]
    }
   ],
   "source": [
    "with sess.as_default():\n",
    "    tf.initialize_all_variables().run()\n",
    "    for i in range(10):\n",
    "    #     batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "        batch_xs = np.random.rand(100, 784)\n",
    "        batch_ys = np.zeros([100, 10])\n",
    "        label_idx = np.random.randint(0, 10, size=(100))\n",
    "        [batch_ys.itemset((row, col), 1) for row, col in enumerate(label_idx.tolist())]\n",
    "        _, acc = sess.run([train, accuracy], feed_dict={x: batch_xs, y: batch_ys})\n",
    "        print(\"acc:\", acc)\n",
    "#     print(sess.run(accuracy,feed_dict={x:,y:}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 测试tensorflow的bert导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from bert import modeling\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import BertTokenizer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 计算图构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_labels = 14"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_model( model, is_training, labels, num_labels):\n",
    "    \"\"\"Creates a classification model.\"\"\"\n",
    "\n",
    "    # In the demo, we are doing a simple classification task on the entire\n",
    "    # segment.\n",
    "    #\n",
    "    # If you want to use the token-level output, use model.get_sequence_output()\n",
    "    # instead.\n",
    "    output_layer = model.get_pooled_output()\n",
    "\n",
    "    hidden_size = output_layer.shape[-1].value\n",
    "\n",
    "    output_weights = tf.get_variable(\n",
    "      \"output_weights\", [num_labels, hidden_size],\n",
    "      initializer=tf.truncated_normal_initializer(stddev=0.02))\n",
    "\n",
    "    output_bias = tf.get_variable(\n",
    "      \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n",
    "\n",
    "    with tf.variable_scope(\"loss\"):\n",
    "        if is_training:\n",
    "          # I.e., 0.1 dropout\n",
    "          output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n",
    "\n",
    "    logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n",
    "    logits = tf.nn.bias_add(logits, output_bias)\n",
    "    probabilities = tf.nn.softmax(logits, axis=-1)\n",
    "    log_probs = tf.nn.log_softmax(logits, axis=-1)\n",
    "\n",
    "    one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n",
    "\n",
    "    per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n",
    "    loss = tf.reduce_mean(per_example_loss)\n",
    "\n",
    "    return (loss, per_example_loss, logits, probabilities)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W1123 15:50:13.381990 139643540592448 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:93: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n",
      "\n",
      "W1123 15:50:13.398351 139643540592448 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:171: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n",
      "\n",
      "W1123 15:50:13.401705 139643540592448 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:409: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.\n",
      "\n",
      "W1123 15:50:13.425915 139643540592448 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:490: The name tf.assert_less_equal is deprecated. Please use tf.compat.v1.assert_less_equal instead.\n",
      "\n",
      "W1123 15:50:13.982656 139643540592448 lazy_loader.py:50] \n",
      "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "  * https://github.com/tensorflow/io (for I/O related ops)\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n",
      "W1123 15:50:13.994261 139643540592448 deprecation.py:506] From /home/hadoop/THUCLS/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
      "W1123 15:50:14.020776 139643540592448 deprecation.py:323] From /home/hadoop/THUCLS/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use keras.layers.dense instead.\n",
      "W1123 15:50:16.258844 139643540592448 deprecation.py:323] From /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/ops/math_grad.py:1205: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
      "W1123 15:50:19.042438 139643540592448 deprecation.py:506] From /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/training/adagrad.py:76: calling Constant.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Call initializer instance with the dtype argument instead of passing it to the constructor\n"
     ]
    }
   ],
   "source": [
    "bert_config = modeling.BertConfig.from_json_file(\"/home/hadoop/Cambricon/Cambricon-MLU100/publish/bert_config.json\")\n",
    "input_ids=tf.placeholder (shape=[None,512],dtype=tf.int32,name=  \"input_ids\")\n",
    "input_mask=tf.placeholder (shape=[None,512],dtype=tf.int32,name= \"input_mask\")\n",
    "segment_ids=tf.placeholder (shape=[None,512],dtype=tf.int32,name=\"segment_ids\")\n",
    "label_ids = tf.placeholder(shape=[None], dtype=tf.int32, name=\"labels\")\n",
    "is_training = True\n",
    "\n",
    "model = modeling.BertModel(\n",
    "  config=bert_config,\n",
    "  is_training=True,\n",
    "  input_ids=input_ids,\n",
    "  input_mask=input_mask,\n",
    "  token_type_ids=segment_ids,\n",
    "  use_one_hot_embeddings=True)\n",
    "\n",
    "init_checkpoint = \"/home/hadoop/Cambricon/Cambricon-MLU100/publish/bert_model.ckpt\"\n",
    "use_tpu = False\n",
    "tvars = tf.trainable_variables()\n",
    "(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,\n",
    "                                                                                       init_checkpoint)\n",
    "tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n",
    "tf.logging.info(\"**** Trainable Variables ****\")\n",
    "for var in tvars:\n",
    "    init_string = \"\"\n",
    "    if var.name in initialized_variable_names:\n",
    "        init_string = \", *INIT_FROM_CKPT*\"\n",
    "    tf.logging.info(\"  name = %s, shape = %s%s\", var.name, var.shape,\n",
    "                    init_string)\n",
    "\n",
    "(total_loss, per_example_loss, logits, probabilities) = create_model(\n",
    "        model, is_training, label_ids, num_labels)\n",
    "\n",
    "predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n",
    "weights = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n",
    "accuracy = tf.metrics.accuracy(labels=label_ids, predictions=predictions, weights=weights)\n",
    "\n",
    "global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n",
    "train_op = tf.train.AdagradOptimizer(0.00002).minimize(total_loss, global_step)\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 主函数部分"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 创建并导入相关工具"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = BertTokenizer.from_pretrained(\"publish/\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "##### 数据导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "from TNews_Loader import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "test_file = './THUCnews/cnews.test.txt'\n",
    "val_file = './THUCnews/cnews.val.txt'\n",
    "train_file = './THUCnews/cnews.train.txt'\n",
    "val_data = load_data(\"./THUCnews/cnews.val.txt\")\n",
    "tr_data = load_data(train_file)\n",
    "te_data = load_data(test_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "random.shuffle(tr_data)\n",
    "random.shuffle(val_data)\n",
    "random.shuffle(te_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "tr_reader = THUReader(tr_data, 20, 512, tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "val_reader = THUReader(val_data, 20, 512, tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "te_reader = THUReader(te_data, 20, 512, tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "import pickle\n",
    "with open('tr_reader.pickle', 'wb') as handle:\n",
    "    pickle.dump(tr_reader, handle, protocol=pickle.HIGHEST_PROTOCOL)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "with open('te_reader.pickle', 'wb') as handle:\n",
    "    pickle.dump(te_reader, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
    "\n",
    "with open('val_reader.pickle', 'wb') as handle:\n",
    "    pickle.dump(val_reader, handle, protocol=pickle.HIGHEST_PROTOCOL)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 从cache中导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "with open('tr_reader.pickle', 'rb') as handle:\n",
    "    tr_reader = pickle.load(handle)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "inial = tf.global_variables_initializer()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "validation_metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_init = tf.variables_initializer(var_list=validation_metrics_vars)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr_reader.reset_batchsize(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "x, y, s, m = tr_reader.sample()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_steps, _ = tr_reader.label_ids.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "10000"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_steps"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.Session() as sess:\n",
    "    sess.run(inial)\n",
    "    sess.run(acc_init)\n",
    "    feed_dict = {input_ids:x, label_ids:y, segment_ids:s, input_mask:m}\n",
    "    acc, loss, _, step = sess.run([accuracy, total_loss, train_op, global_step],feed_dict=feed_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0.0, 0.0)"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[    1/10000], loss/acc = 2.52834892/0.0000000, 0.2000000 \n",
      "[    2/10000], loss/acc = 2.70694780/0.2000000, 0.1000000 \n",
      "[    3/10000], loss/acc = 2.71276951/0.1000000, 0.0666667 \n",
      "[    4/10000], loss/acc = 2.58116150/0.0666667, 0.1000000 \n",
      "[    5/10000], loss/acc = 2.57085395/0.1000000, 0.0800000 \n",
      "[    6/10000], loss/acc = 2.43122911/0.0800000, 0.1000000 \n",
      "[    7/10000], loss/acc = 2.36920547/0.1000000, 0.0857143 \n",
      "[    8/10000], loss/acc = 2.55313540/0.0857143, 0.0750000 \n",
      "[    9/10000], loss/acc = 2.58316112/0.0750000, 0.0666667 \n",
      "[   10/10000], loss/acc = 2.86783409/0.0666667, 0.0600000 \n",
      "[   11/10000], loss/acc = 2.56050038/0.0600000, 0.0545455 \n",
      "[   12/10000], loss/acc = 2.71026492/0.0545455, 0.0500000 \n",
      "[   13/10000], loss/acc = 2.48097944/0.0500000, 0.0461538 \n",
      "[   14/10000], loss/acc = 2.68827820/0.0461538, 0.0428571 \n",
      "[   15/10000], loss/acc = 2.63407326/0.0428571, 0.0400000 \n",
      "[   16/10000], loss/acc = 2.50801420/0.0400000, 0.0500000 \n",
      "[   17/10000], loss/acc = 2.66699386/0.0500000, 0.0470588 \n",
      "[   18/10000], loss/acc = 2.75261068/0.0470588, 0.0444444 \n",
      "[   19/10000], loss/acc = 2.70321608/0.0444444, 0.0526316 \n",
      "[   20/10000], loss/acc = 2.85983920/0.0526316, 0.0500000 \n",
      "[   21/10000], loss/acc = 2.82104754/0.0500000, 0.0476190 \n",
      "[   22/10000], loss/acc = 2.48982024/0.0476190, 0.0454545 \n",
      "[   23/10000], loss/acc = 2.44038463/0.0454545, 0.0434783 \n",
      "[   24/10000], loss/acc = 2.80181646/0.0434783, 0.0500000 \n",
      "[   25/10000], loss/acc = 2.88122749/0.0500000, 0.0480000 \n",
      "[   26/10000], loss/acc = 2.86922312/0.0480000, 0.0461538 \n",
      "[   27/10000], loss/acc = 2.51059198/0.0461538, 0.0444444 \n",
      "[   28/10000], loss/acc = 2.35556149/0.0444444, 0.0500000 \n",
      "[   29/10000], loss/acc = 2.40780759/0.0500000, 0.0551724 \n",
      "[   30/10000], loss/acc = 2.77946520/0.0551724, 0.0533333 \n",
      "[   31/10000], loss/acc = 2.64183450/0.0533333, 0.0580645 \n",
      "[   32/10000], loss/acc = 2.77648640/0.0580645, 0.0562500 \n",
      "[   33/10000], loss/acc = 2.66027689/0.0562500, 0.0545455 \n",
      "[   34/10000], loss/acc = 2.69968271/0.0545455, 0.0529412 \n",
      "[   35/10000], loss/acc = 2.61188388/0.0529412, 0.0514286 \n",
      "[   36/10000], loss/acc = 2.83969736/0.0514286, 0.0500000 \n",
      "[   37/10000], loss/acc = 2.58912349/0.0500000, 0.0486486 \n",
      "[   38/10000], loss/acc = 2.87419558/0.0486486, 0.0473684 \n",
      "[   39/10000], loss/acc = 2.51925135/0.0473684, 0.0461538 \n",
      "[   40/10000], loss/acc = 2.58703923/0.0461538, 0.0450000 \n",
      "[   41/10000], loss/acc = 2.87379503/0.0450000, 0.0439024 \n",
      "[   42/10000], loss/acc = 2.71689963/0.0439024, 0.0428571 \n",
      "[   43/10000], loss/acc = 2.44751549/0.0428571, 0.0418605 \n",
      "[   44/10000], loss/acc = 2.74342871/0.0418605, 0.0409091 \n",
      "[   45/10000], loss/acc = 2.63981104/0.0409091, 0.0400000 \n",
      "[   46/10000], loss/acc = 2.63006711/0.0400000, 0.0391304 \n",
      "[   47/10000], loss/acc = 2.37466002/0.0391304, 0.0468085 \n",
      "[   48/10000], loss/acc = 2.66479206/0.0468085, 0.0458333 \n",
      "[   49/10000], loss/acc = 2.54908562/0.0458333, 0.0489796 \n",
      "[   50/10000], loss/acc = 2.65887070/0.0489796, 0.0520000 \n",
      "[   51/10000], loss/acc = 2.51910377/0.0520000, 0.0509804 \n",
      "[   52/10000], loss/acc = 2.44450712/0.0509804, 0.0538462 \n",
      "[   53/10000], loss/acc = 2.68943501/0.0538462, 0.0528302 \n",
      "[   54/10000], loss/acc = 2.75958705/0.0528302, 0.0518519 \n",
      "[   55/10000], loss/acc = 2.68642855/0.0518519, 0.0545455 \n",
      "[   56/10000], loss/acc = 2.45630407/0.0545455, 0.0571429 \n",
      "[   57/10000], loss/acc = 2.76548815/0.0571429, 0.0561404 \n",
      "[   58/10000], loss/acc = 2.49560070/0.0561404, 0.0551724 \n",
      "[   59/10000], loss/acc = 2.53753400/0.0551724, 0.0542373 \n",
      "[   60/10000], loss/acc = 2.62671781/0.0542373, 0.0533333 \n",
      "[   61/10000], loss/acc = 2.66441083/0.0533333, 0.0524590 \n",
      "[   62/10000], loss/acc = 2.68350673/0.0524590, 0.0516129 \n",
      "[   63/10000], loss/acc = 2.68876362/0.0516129, 0.0539683 \n",
      "[   64/10000], loss/acc = 2.34147763/0.0539683, 0.0562500 \n",
      "[   65/10000], loss/acc = 2.63902378/0.0562500, 0.0584615 \n",
      "[   66/10000], loss/acc = 2.66738510/0.0584615, 0.0606061 \n",
      "[   67/10000], loss/acc = 2.56889963/0.0606061, 0.0597015 \n",
      "[   68/10000], loss/acc = 2.77155161/0.0597015, 0.0588235 \n",
      "[   69/10000], loss/acc = 2.83347821/0.0588235, 0.0579710 \n",
      "[   70/10000], loss/acc = 2.42674780/0.0579710, 0.0600000 \n",
      "[   71/10000], loss/acc = 2.45685792/0.0600000, 0.0619718 \n",
      "[   72/10000], loss/acc = 2.31171942/0.0619718, 0.0666667 \n",
      "[   73/10000], loss/acc = 2.42384648/0.0666667, 0.0684932 \n",
      "[   74/10000], loss/acc = 2.47473621/0.0684932, 0.0729730 \n",
      "[   75/10000], loss/acc = 2.61803961/0.0729730, 0.0720000 \n",
      "[   76/10000], loss/acc = 2.53087831/0.0720000, 0.0736842 \n",
      "[   77/10000], loss/acc = 2.58004403/0.0736842, 0.0753247 \n",
      "[   78/10000], loss/acc = 2.58281803/0.0753247, 0.0743590 \n",
      "[   79/10000], loss/acc = 2.60761833/0.0743590, 0.0734177 \n",
      "[   80/10000], loss/acc = 2.60517597/0.0734177, 0.0725000 \n",
      "[   81/10000], loss/acc = 2.61233330/0.0725000, 0.0716049 \n",
      "[   82/10000], loss/acc = 2.47039604/0.0716049, 0.0731707 \n",
      "[   83/10000], loss/acc = 2.72701335/0.0731707, 0.0722892 \n",
      "[   84/10000], loss/acc = 2.49372673/0.0722892, 0.0738095 \n",
      "[   85/10000], loss/acc = 2.50989294/0.0738095, 0.0752941 \n",
      "[   86/10000], loss/acc = 2.66100812/0.0752941, 0.0744186 \n",
      "[   87/10000], loss/acc = 2.70811987/0.0744186, 0.0735632 \n",
      "[   88/10000], loss/acc = 2.71733022/0.0735632, 0.0727273 \n",
      "[   89/10000], loss/acc = 2.71415019/0.0727273, 0.0719101 \n",
      "[   90/10000], loss/acc = 2.45164680/0.0719101, 0.0711111 \n",
      "[   91/10000], loss/acc = 2.51682997/0.0711111, 0.0725275 \n",
      "[   92/10000], loss/acc = 2.56533217/0.0725275, 0.0717391 \n",
      "[   93/10000], loss/acc = 2.33607864/0.0717391, 0.0731183 \n",
      "[   94/10000], loss/acc = 2.62528658/0.0731183, 0.0723404 \n",
      "[   95/10000], loss/acc = 2.58184004/0.0723404, 0.0715789 \n",
      "[   96/10000], loss/acc = 2.58580542/0.0715789, 0.0708333 \n",
      "[   97/10000], loss/acc = 2.55167723/0.0708333, 0.0721649 \n",
      "[   98/10000], loss/acc = 2.42946863/0.0721649, 0.0734694 \n",
      "[   99/10000], loss/acc = 2.56450343/0.0734694, 0.0727273 \n",
      "[  100/10000], loss/acc = 2.33129930/0.0727273, 0.0780000 \n",
      "[  101/10000], loss/acc = 2.55176687/0.0780000, 0.0792079 \n",
      "[  102/10000], loss/acc = 2.44029164/0.0792079, 0.0784314 \n",
      "[  103/10000], loss/acc = 2.45882535/0.0784314, 0.0776699 \n",
      "[  104/10000], loss/acc = 2.63394785/0.0776699, 0.0769231 \n",
      "[  105/10000], loss/acc = 2.64559412/0.0769231, 0.0761905 \n",
      "[  106/10000], loss/acc = 2.40964937/0.0761905, 0.0773585 \n",
      "[  107/10000], loss/acc = 2.40725279/0.0773585, 0.0785047 \n",
      "[  108/10000], loss/acc = 2.55467606/0.0785047, 0.0777778 \n",
      "[  109/10000], loss/acc = 2.58958292/0.0777778, 0.0788991 \n",
      "[  110/10000], loss/acc = 2.33073950/0.0788991, 0.0818182 \n",
      "[  111/10000], loss/acc = 2.56836510/0.0818182, 0.0810811 \n",
      "[  112/10000], loss/acc = 2.26455140/0.0810811, 0.0803571 \n",
      "[  113/10000], loss/acc = 2.51185989/0.0803571, 0.0814159 \n",
      "[  114/10000], loss/acc = 2.35960722/0.0814159, 0.0824561 \n",
      "[  115/10000], loss/acc = 2.77039003/0.0824561, 0.0817391 \n",
      "[  116/10000], loss/acc = 2.68026662/0.0817391, 0.0827586 \n",
      "[  117/10000], loss/acc = 2.94951415/0.0827586, 0.0820513 \n",
      "[  118/10000], loss/acc = 2.81119275/0.0820513, 0.0813559 \n",
      "[  119/10000], loss/acc = 2.36966991/0.0813559, 0.0806723 \n",
      "[  120/10000], loss/acc = 2.34174013/0.0806723, 0.0833333 \n",
      "[  121/10000], loss/acc = 2.50872755/0.0833333, 0.0826446 \n",
      "[  122/10000], loss/acc = 2.45702505/0.0826446, 0.0819672 \n",
      "[  123/10000], loss/acc = 2.63818288/0.0819672, 0.0813008 \n",
      "[  124/10000], loss/acc = 2.72756147/0.0813008, 0.0806452 \n",
      "[  125/10000], loss/acc = 2.73196650/0.0806452, 0.0800000 \n",
      "[  126/10000], loss/acc = 2.48962736/0.0800000, 0.0809524 \n",
      "[  127/10000], loss/acc = 2.53162885/0.0809524, 0.0803150 \n",
      "[  128/10000], loss/acc = 2.52582550/0.0803150, 0.0812500 \n",
      "[  129/10000], loss/acc = 2.41617346/0.0812500, 0.0806202 \n",
      "[  130/10000], loss/acc = 2.58584714/0.0806202, 0.0815385 \n",
      "[  131/10000], loss/acc = 2.86505651/0.0815385, 0.0809160 \n",
      "[  132/10000], loss/acc = 2.31874967/0.0809160, 0.0803030 \n",
      "[  133/10000], loss/acc = 2.47178435/0.0803030, 0.0812030 \n",
      "[  134/10000], loss/acc = 2.57057428/0.0812030, 0.0805970 \n",
      "[  135/10000], loss/acc = 2.46394110/0.0805970, 0.0814815 \n",
      "[  136/10000], loss/acc = 2.31142306/0.0814815, 0.0838235 \n",
      "[  137/10000], loss/acc = 2.18285799/0.0838235, 0.0861314 \n",
      "[  138/10000], loss/acc = 2.53093195/0.0861314, 0.0869565 \n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-53-a0313253703d>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      4\u001b[0m     \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mm\u001b[0m \u001b[0;32min\u001b[0m  \u001b[0mtr_reader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m         \u001b[0mfeed_dict\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0minput_ids\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabel_ids\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msegment_ids\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_mask\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mm\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m         \u001b[0macc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstep\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0maccuracy\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtotal_loss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      7\u001b[0m         print('[%5d/%5d], loss/acc = %6.8f/%6.7f, %6.7f ' % ( step, train_steps,\n\u001b[1;32m      8\u001b[0m                                                         loss, acc[0], acc[1])\n",
      "\u001b[0;32m~/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m    948\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    949\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 950\u001b[0;31m                          run_metadata_ptr)\n\u001b[0m\u001b[1;32m    951\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    952\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1171\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1172\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1173\u001b[0;31m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m   1174\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1175\u001b[0m       \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1348\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1349\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1350\u001b[0;31m                            run_metadata)\n\u001b[0m\u001b[1;32m   1351\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1352\u001b[0m       \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m   1354\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1355\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1356\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1357\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1358\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m   1339\u001b[0m       \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1340\u001b[0m       return self._call_tf_sessionrun(\n\u001b[0;32m-> 1341\u001b[0;31m           options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m   1342\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1343\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m   1427\u001b[0m     return tf_session.TF_SessionRun_wrapper(\n\u001b[1;32m   1428\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1429\u001b[0;31m         run_metadata)\n\u001b[0m\u001b[1;32m   1430\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1431\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "with tf.Session() as sess:\n",
    "    sess.run(inial)\n",
    "    sess.run(acc_init)\n",
    "    for epoch in range(t_epochs):\n",
    "        for x, y, s, m in  tr_reader.iter():\n",
    "            feed_dict = {input_ids:x, label_ids:y, segment_ids:s, input_mask:m}\n",
    "            acc, loss, _, step = sess.run([accuracy, total_loss, train_op, global_step],feed_dict=feed_dict)\n",
    "            print('[%5d/%5d] | %5d, loss/acc = %6.8f/%6.7f, %6.7f ' % ( step, train_steps, epoch\n",
    "                                                            loss, acc[0], acc[1])\n",
    "                             ) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
