{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from bert import modeling\n",
    "import os\n",
    "from transformers import BertTokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from TNews_Loader import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class adict(dict):\n",
    "    ''' Attribute dictionary - a convenience data structure, similar to SimpleNamespace in python 3.3\n",
    "        One can use attributes to read/write dictionary content.\n",
    "    '''\n",
    "    def __init__(self, *av, **kav):\n",
    "        dict.__init__(self, *av, **kav)\n",
    "        self.__dict__ = self"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_model(model, is_training, labels, num_labels=14):\n",
    "    \"\"\"Creates a classification model.\"\"\"\n",
    "    # In the demo, we are doing a simple classification task on the entire\n",
    "    # segment.\n",
    "    #\n",
    "    # If you want to use the token-level output, use model.get_sequence_output()\n",
    "    # instead.\n",
    "    output_layer = model.get_pooled_output()\n",
    "    hidden_size = output_layer.shape[-1].value\n",
    "    output_weights = tf.get_variable(\n",
    "      \"output_weights\", [num_labels, hidden_size],\n",
    "      initializer=tf.truncated_normal_initializer(stddev=0.02))\n",
    "    output_bias = tf.get_variable(\n",
    "      \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n",
    "    with tf.variable_scope(\"loss\"):\n",
    "        if is_training:\n",
    "          # I.e., 0.1 dropout\n",
    "          output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n",
    "    logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n",
    "    logits = tf.nn.bias_add(logits, output_bias)\n",
    "    probabilities = tf.nn.softmax(logits, axis=-1, name=\"probabilities\")\n",
    "    log_probs = tf.nn.log_softmax(logits, axis=-1, name=\"log_probs\")\n",
    "    one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n",
    "    per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name=\"per_example_loss\")\n",
    "    loss = tf.reduce_mean(per_example_loss, name=\"loss\")\n",
    "    return (loss, per_example_loss, logits, probabilities)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def TopicTrainGraph(max_sent_len=256):\n",
    "    bert_config = modeling.BertConfig.from_json_file(\"./publish/bert_config.json\")\n",
    "    input_ids = tf.placeholder(shape=[None, max_sent_len], dtype=tf.int32, name=\"input_ids\")\n",
    "    input_mask = tf.placeholder(shape=[None, max_sent_len], dtype=tf.int32, name=\"input_mask\")\n",
    "    segment_ids = tf.placeholder(shape=[None, max_sent_len], dtype=tf.int32, name=\"segment_ids\")\n",
    "    label_ids = tf.placeholder(shape=[None], dtype=tf.int32, name=\"labels\")\n",
    "    is_training = True\n",
    "    model = modeling.BertModel(\n",
    "        config=bert_config,\n",
    "        is_training=True,\n",
    "        input_ids=input_ids,\n",
    "        input_mask=input_mask,\n",
    "        token_type_ids=segment_ids,\n",
    "        use_one_hot_embeddings=True)\n",
    "\n",
    "    init_checkpoint = \"./publish/bert_model.ckpt\"\n",
    "    tvars = tf.trainable_variables()\n",
    "    (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,\n",
    "                                                                                               init_checkpoint)\n",
    "    tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n",
    "    tf.logging.info(\"**** Trainable Variables ****\")\n",
    "    for var in tvars:\n",
    "        init_string = \"\"\n",
    "        if var.name in initialized_variable_names:\n",
    "            init_string = \", *INIT_FROM_CKPT*\"\n",
    "        tf.logging.info(\"  name = %s, shape = %s%s\", var.name, var.shape,\n",
    "                        init_string)\n",
    "\n",
    "    (total_loss, per_example_loss, logits, probabilities) = create_model(\n",
    "        model, is_training, label_ids, num_labels=14)\n",
    "\n",
    "    predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n",
    "    weights = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n",
    "    accuracy = tf.metrics.accuracy(labels=label_ids, predictions=predictions, weights=weights, name=\"accuracy\")\n",
    "\n",
    "    global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n",
    "    train_op = tf.train.AdamOptimizer(0.00002).minimize(total_loss, global_step)\n",
    "    return adict(\n",
    "        input_ids=input_ids,\n",
    "        input_mask=input_mask,\n",
    "        token_type_ids=segment_ids,\n",
    "        label_ids=label_ids,\n",
    "        predictions=predictions,\n",
    "        probabilities=probabilities,\n",
    "        logits=logits,\n",
    "        total_loss=total_loss,\n",
    "        global_step=global_step,\n",
    "        train_op=train_op,\n",
    "        accuracy=accuracy\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_file = './THUCnews/cnews.test.txt'\n",
    "val_file = './THUCnews/cnews.val.txt'\n",
    "train_file = './THUCnews/cnews.train.txt'\n",
    "\n",
    "tokenizer = BertTokenizer.from_pretrained('./publish/')\n",
    "train_reader = THUReader(train_file, batchsize=20, max_seq_length=256, tokenizer=tokenizer)\n",
    "val_reader = THUReader(val_file, batchsize=20, max_seq_length=256, tokenizer=tokenizer)\n",
    "test_reader = THUReader(test_file, batchsize=20, max_seq_length=256, tokenizer=tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_graph(model_file):\n",
    "    graph = tf.Graph()\n",
    "    graph_def = tf.GraphDef()\n",
    "    with open(model_file, \"rb\") as fb:\n",
    "        graph_def.ParseFromString(fb.read())\n",
    "    with graph.as_default():\n",
    "        tf.import_graph_def(graph_def)\n",
    "    return graph, graph_def"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/ipykernel_launcher.py:5: RuntimeWarning: Unexpected end-group tag: Not all data was converted\n",
      "  \"\"\"\n"
     ]
    }
   ],
   "source": [
    "graph, g_def = load_graph(\"./thu_model.ckpt-0.index\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W0831 21:39:45.437606 140103319508800 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:93: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n",
      "\n",
      "W0831 21:39:45.451912 140103319508800 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:171: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n",
      "\n",
      "W0831 21:39:45.453356 140103319508800 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:409: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.\n",
      "\n",
      "W0831 21:39:45.474048 140103319508800 deprecation_wrapper.py:119] From /home/hadoop/THUCLS/bert/modeling.py:490: The name tf.assert_less_equal is deprecated. Please use tf.compat.v1.assert_less_equal instead.\n",
      "\n",
      "/home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/dask/dataframe/utils.py:13: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n",
      "  import pandas.util.testing as tm\n",
      "W0831 21:39:46.266984 140103319508800 lazy_loader.py:50] \n",
      "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "  * https://github.com/tensorflow/io (for I/O related ops)\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n",
      "W0831 21:39:46.276839 140103319508800 deprecation.py:506] From /home/hadoop/THUCLS/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
      "W0831 21:39:46.301602 140103319508800 deprecation.py:323] From /home/hadoop/THUCLS/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use keras.layers.dense instead.\n",
      "W0831 21:39:48.489064 140103319508800 deprecation.py:323] From /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/ops/math_grad.py:1205: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
      "W0831 21:39:56.761395 140103319508800 deprecation.py:323] From /home/hadoop/.conda/envs/TF_GPU/lib/python3.7/site-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n"
     ]
    }
   ],
   "source": [
    "graph = TopicTrainGraph(max_sent_len=256)\n",
    "validation_metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)\n",
    "acc_init = tf.variables_initializer(var_list=validation_metrics_vars)\n",
    "saver = tf.train.Saver()\n",
    "sess = tf.Session()\n",
    "with sess.as_default():\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    sess.run(acc_init)\n",
    "    saver = tf.train.Saver()\n",
    "    saver.restore(sess, \"./thu_saved/thu_model.ckpt-1\")\n",
    "#     saver.save(sess, \"./thu_model.ckpt\", global_step=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(0.0, 0.95)\n",
      "(0.95, 0.975)\n",
      "(0.975, 0.98333335)\n",
      "(0.98333335, 0.95)\n",
      "(0.95, 0.95)\n",
      "(0.95, 0.95)\n",
      "(0.95, 0.95)\n",
      "(0.95, 0.94375)\n",
      "(0.94375, 0.9388889)\n",
      "(0.9388889, 0.945)\n",
      "(0.945, 0.95)\n",
      "(0.95, 0.94166666)\n",
      "(0.94166666, 0.9461538)\n",
      "(0.9461538, 0.95)\n",
      "(0.95, 0.95)\n",
      "(0.95, 0.946875)\n",
      "(0.946875, 0.9470588)\n",
      "(0.9470588, 0.9444444)\n",
      "(0.9444444, 0.94210523)\n",
      "(0.94210523, 0.945)\n",
      "(0.945, 0.9452381)\n",
      "(0.9452381, 0.94545454)\n",
      "(0.94545454, 0.9456522)\n",
      "(0.9456522, 0.9458333)\n",
      "(0.9458333, 0.946)\n",
      "(0.946, 0.9461538)\n",
      "(0.9461538, 0.9462963)\n",
      "(0.9462963, 0.94464284)\n",
      "(0.94464284, 0.94655174)\n",
      "(0.94655174, 0.945)\n",
      "(0.945, 0.9467742)\n",
      "(0.9467742, 0.9453125)\n",
      "(0.9453125, 0.94242424)\n",
      "(0.94242424, 0.9411765)\n",
      "(0.9411765, 0.94285715)\n",
      "(0.94285715, 0.94305557)\n",
      "(0.94305557, 0.9418919)\n",
      "(0.9418919, 0.9394737)\n",
      "(0.9394737, 0.9397436)\n",
      "(0.9397436, 0.94125)\n",
      "(0.94125, 0.9414634)\n",
      "(0.9414634, 0.94285715)\n",
      "(0.94285715, 0.94302326)\n",
      "(0.94302326, 0.94204545)\n",
      "(0.94204545, 0.94222224)\n",
      "(0.94222224, 0.9434783)\n",
      "(0.9434783, 0.94361705)\n",
      "(0.94361705, 0.9447917)\n",
      "(0.9447917, 0.94285715)\n",
      "(0.94285715, 0.942)\n",
      "(0.942, 0.9431372)\n",
      "(0.9431372, 0.94326925)\n",
      "(0.94326925, 0.9433962)\n",
      "(0.9433962, 0.9444444)\n",
      "(0.9444444, 0.94363636)\n",
      "(0.94363636, 0.94375)\n",
      "(0.94375, 0.94210523)\n",
      "(0.94210523, 0.94310343)\n",
      "(0.94310343, 0.9440678)\n",
      "(0.9440678, 0.94416666)\n",
      "(0.94416666, 0.94426227)\n",
      "(0.94426227, 0.9451613)\n",
      "(0.9451613, 0.9444444)\n",
      "(0.9444444, 0.9453125)\n",
      "(0.9453125, 0.94461536)\n",
      "(0.94461536, 0.9439394)\n",
      "(0.9439394, 0.9447761)\n",
      "(0.9447761, 0.94558823)\n",
      "(0.94558823, 0.9442029)\n",
      "(0.9442029, 0.9442857)\n",
      "(0.9442857, 0.9443662)\n",
      "(0.9443662, 0.9444444)\n",
      "(0.9444444, 0.94520545)\n",
      "(0.94520545, 0.9459459)\n",
      "(0.9459459, 0.946)\n",
      "(0.946, 0.94539475)\n",
      "(0.94539475, 0.94545454)\n",
      "(0.94545454, 0.94551283)\n",
      "(0.94551283, 0.94556963)\n",
      "(0.94556963, 0.94625)\n",
      "(0.94625, 0.9469136)\n",
      "(0.9469136, 0.94634145)\n",
      "(0.94634145, 0.9469879)\n",
      "(0.9469879, 0.9470238)\n",
      "(0.9470238, 0.9470588)\n",
      "(0.9470588, 0.94593024)\n",
      "(0.94593024, 0.94655174)\n",
      "(0.94655174, 0.9465909)\n",
      "(0.9465909, 0.947191)\n",
      "(0.947191, 0.94777775)\n",
      "(0.94777775, 0.94725275)\n",
      "(0.94725275, 0.94673914)\n",
      "(0.94673914, 0.9467742)\n",
      "(0.9467742, 0.9473404)\n",
      "(0.9473404, 0.94789475)\n",
      "(0.94789475, 0.9484375)\n",
      "(0.9484375, 0.94793814)\n",
      "(0.94793814, 0.94744897)\n",
      "(0.94744897, 0.9479798)\n",
      "(0.9479798, 0.947)\n",
      "(0.947, 0.9470297)\n",
      "(0.9470297, 0.9465686)\n",
      "(0.9465686, 0.9470874)\n",
      "(0.9470874, 0.94711536)\n",
      "(0.94711536, 0.94714284)\n",
      "(0.94714284, 0.94716984)\n",
      "(0.94716984, 0.94766355)\n",
      "(0.94766355, 0.9476852)\n",
      "(0.9476852, 0.9481651)\n",
      "(0.9481651, 0.94863635)\n",
      "(0.94863635, 0.9481982)\n",
      "(0.9481982, 0.94866073)\n",
      "(0.94866073, 0.94911504)\n",
      "(0.94911504, 0.9486842)\n",
      "(0.9486842, 0.9478261)\n",
      "(0.9478261, 0.9478448)\n",
      "(0.9478448, 0.9482906)\n",
      "(0.9482906, 0.9483051)\n",
      "(0.9483051, 0.94873947)\n",
      "(0.94873947, 0.94875)\n",
      "(0.94875, 0.94917357)\n",
      "(0.94917357, 0.9491803)\n",
      "(0.9491803, 0.949187)\n",
      "(0.949187, 0.9487903)\n",
      "(0.9487903, 0.9488)\n",
      "(0.9488, 0.9488095)\n",
      "(0.9488095, 0.9488189)\n",
      "(0.9488189, 0.9488281)\n",
      "(0.9488281, 0.9488372)\n",
      "(0.9488372, 0.94884616)\n",
      "(0.94884616, 0.94923663)\n",
      "(0.94923663, 0.94848484)\n",
      "(0.94848484, 0.94774437)\n",
      "(0.94774437, 0.9477612)\n",
      "(0.9477612, 0.94814813)\n",
      "(0.94814813, 0.9481618)\n",
      "(0.9481618, 0.9481752)\n",
      "(0.9481752, 0.9485507)\n",
      "(0.9485507, 0.94892085)\n",
      "(0.94892085, 0.9489286)\n",
      "(0.9489286, 0.9485816)\n",
      "(0.9485816, 0.9489437)\n",
      "(0.9489437, 0.94895107)\n",
      "(0.94895107, 0.94930553)\n",
      "(0.94930553, 0.9486207)\n",
      "(0.9486207, 0.94863015)\n",
      "(0.94863015, 0.9489796)\n",
      "(0.9489796, 0.9489865)\n",
      "(0.9489865, 0.94899327)\n",
      "(0.94899327, 0.9493333)\n",
      "(0.9493333, 0.9493377)\n",
      "(0.9493377, 0.9490132)\n",
      "(0.9490132, 0.9493464)\n",
      "(0.9493464, 0.949026)\n",
      "(0.949026, 0.9493548)\n",
      "(0.9493548, 0.9496795)\n",
      "(0.9496795, 0.9490446)\n",
      "(0.9490446, 0.9481013)\n",
      "(0.9481013, 0.94748425)\n",
      "(0.94748425, 0.946875)\n",
      "(0.946875, 0.94627327)\n",
      "(0.94627327, 0.94660497)\n",
      "(0.94660497, 0.9469325)\n",
      "(0.9469325, 0.9472561)\n",
      "(0.9472561, 0.9463636)\n",
      "(0.9463636, 0.94638556)\n",
      "(0.94638556, 0.9461078)\n",
      "(0.9461078, 0.9464286)\n",
      "(0.9464286, 0.9467456)\n",
      "(0.9467456, 0.9467647)\n",
      "(0.9467647, 0.947076)\n",
      "(0.947076, 0.9468023)\n",
      "(0.9468023, 0.9468208)\n",
      "(0.9468208, 0.9468391)\n",
      "(0.9468391, 0.94714284)\n",
      "(0.94714284, 0.9471591)\n",
      "(0.9471591, 0.9468927)\n",
      "(0.9468927, 0.947191)\n",
      "(0.947191, 0.94748604)\n",
      "(0.94748604, 0.94777775)\n",
      "(0.94777775, 0.94779)\n",
      "(0.94779, 0.9478022)\n",
      "(0.9478022, 0.947541)\n",
      "(0.947541, 0.9478261)\n",
      "(0.9478261, 0.9475676)\n",
      "(0.9475676, 0.9473118)\n",
      "(0.9473118, 0.9470588)\n",
      "(0.9470588, 0.9468085)\n",
      "(0.9468085, 0.94656086)\n",
      "(0.94656086, 0.9465789)\n",
      "(0.9465789, 0.94685864)\n",
      "(0.94685864, 0.9471354)\n",
      "(0.9471354, 0.94637305)\n",
      "(0.94637305, 0.94639176)\n",
      "(0.94639176, 0.94666666)\n",
      "(0.94666666, 0.94668365)\n",
      "(0.94668365, 0.9467005)\n",
      "(0.9467005, 0.94646466)\n",
      "(0.94646466, 0.94673365)\n",
      "(0.94673365, 0.9465)\n",
      "(0.9465, 0.9465174)\n",
      "(0.9465174, 0.94653463)\n",
      "(0.94653463, 0.946798)\n",
      "(0.946798, 0.9470588)\n",
      "(0.9470588, 0.94707316)\n",
      "(0.94707316, 0.9470874)\n",
      "(0.9470874, 0.9471015)\n",
      "(0.9471015, 0.946875)\n",
      "(0.946875, 0.9471292)\n",
      "(0.9471292, 0.94714284)\n",
      "(0.94714284, 0.94715637)\n",
      "(0.94715637, 0.94740564)\n",
      "(0.94740564, 0.94694835)\n",
      "(0.94694835, 0.9469626)\n",
      "(0.9469626, 0.9469767)\n",
      "(0.9469767, 0.9469907)\n",
      "(0.9469907, 0.94723505)\n",
      "(0.94723505, 0.94747704)\n",
      "(0.94747704, 0.9474886)\n",
      "(0.9474886, 0.94772726)\n",
      "(0.94772726, 0.9477376)\n",
      "(0.9477376, 0.94774777)\n",
      "(0.94774777, 0.9479821)\n",
      "(0.9479821, 0.94776785)\n",
      "(0.94776785, 0.948)\n",
      "(0.948, 0.9482301)\n",
      "(0.9482301, 0.9482379)\n",
      "(0.9482379, 0.94846493)\n",
      "(0.94846493, 0.94868994)\n",
      "(0.94868994, 0.94869566)\n",
      "(0.94869566, 0.94848484)\n",
      "(0.94848484, 0.9487069)\n",
      "(0.9487069, 0.94892704)\n",
      "(0.94892704, 0.9491453)\n",
      "(0.9491453, 0.94914895)\n",
      "(0.94914895, 0.9491525)\n",
      "(0.9491525, 0.9491561)\n",
      "(0.9491561, 0.94873947)\n",
      "(0.94873947, 0.94853556)\n",
      "(0.94853556, 0.94875)\n",
      "(0.94875, 0.9485477)\n",
      "(0.9485477, 0.9483471)\n",
      "(0.9483471, 0.9483539)\n",
      "(0.9483539, 0.94815576)\n",
      "(0.94815576, 0.9479592)\n",
      "(0.9479592, 0.9481707)\n",
      "(0.9481707, 0.9483806)\n",
      "(0.9483806, 0.9481855)\n",
      "(0.9481855, 0.9483936)\n",
      "(0.9483936, 0.9484)\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'loss' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-10-6a32ae642056>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     16\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     17\u001b[0m     \u001b[0mval_acc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0maccs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 18\u001b[0;31m     \u001b[0mval_loss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m: name 'loss' is not defined"
     ]
    }
   ],
   "source": [
    "with sess.as_default():\n",
    "    accs = []\n",
    "    losses = []\n",
    "    accuracy = tf.metrics.accuracy(labels=graph.label_ids, predictions=graph.predictions, \n",
    "                                    name=\"valid_accuracy\")\n",
    "    validation_metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)\n",
    "    acc_init = tf.variables_initializer(var_list=validation_metrics_vars)\n",
    "    sess.run(acc_init)\n",
    "    for x, m, y in val_reader.iter():\n",
    "        s = np.zeros_like(x)\n",
    "        feed_dict = {graph.input_ids: x, graph.label_ids: y, graph.token_type_ids: s, graph.input_mask: m}\n",
    "        acc, = sess.run([accuracy], feed_dict=feed_dict)\n",
    "        accs.append(acc)\n",
    "        print(acc)\n",
    "#         losses.append(loss)\n",
    "\n",
    "    val_acc = np.mean(accs)\n",
    "#     val_loss = np.mean(loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0.93920076, 0.30453116)"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "val_acc, val_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "with sess.as_default():\n",
    "    accs = []\n",
    "    losses = []\n",
    "    accuracy = tf.metrics.accuracy(labels=graph.label_ids, predictions=graph.predictions, \n",
    "                                    name=\"valid_accuracy\")\n",
    "    validation_metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)\n",
    "    acc_init = tf.variables_initializer(var_list=validation_metrics_vars)\n",
    "    sess.run(acc_init)\n",
    "    for x, m, y in test_reader.iter():\n",
    "        s = np.zeros_like(x)\n",
    "        feed_dict = {graph.input_ids: x, graph.label_ids: y, graph.token_type_ids: s, graph.input_mask: m}\n",
    "        acc, = sess.run([accuracy], feed_dict=feed_dict)\n",
    "        accs.append(acc)\n",
    "#         print(acc)\n",
    "#         losses.append(loss)\n",
    "\n",
    "#     val_acc = np.mean(accs)\n",
    "#     val_loss = np.mean(loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[(0.96646464, 0.96643144),\n",
       " (0.96643144, 0.9663984),\n",
       " (0.9663984, 0.9664659),\n",
       " (0.9664659, 0.96643287),\n",
       " (0.96643287, 0.9665)]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "accs[-5:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "with sess.as_default():\n",
    "    accs = []\n",
    "    losses = []\n",
    "    for x, m, y in test_reader.iter():\n",
    "        s = np.zeros_like(x)\n",
    "        feed_dict = {graph.input_ids: x, graph.label_ids: y, graph.token_type_ids: s, graph.input_mask: m}\n",
    "        acc, loss = sess.run([graph.accuracy, graph.total_loss], feed_dict=feed_dict)\n",
    "        accs.append(acc)\n",
    "        losses.append(loss)\n",
    "\n",
    "    te_acc = np.mean(accs)\n",
    "    te_loss = np.mean(loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0.95601636, 0.35598582)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "te_acc, te_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess = tf.Session()\n",
    "with sess.as_default():\n",
    "    saver = tf.train.import_meta_graph('./thu_saved/thu_model.ckpt-1.meta')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "with sess.as_default():\n",
    "    g = tf.get_default_graph()\n",
    "    input_ids = g.get_tensor_by_name(\"input_ids:0\")\n",
    "    input_mask = g.get_tensor_by_name(\"input_mask:0\")\n",
    "    segment_ids = g.get_tensor_by_name(\"segment_ids:0\")\n",
    "    label_ids = g.get_tensor_by_name(\"labels:0\")\n",
    "    probs = g.get_tensor_by_name(\"probabilities:0\")\n",
    "    per_example_loss = g.get_tensor_by_name(\"per_example_loss:0\")\n",
    "    accuracy = tf.metrics.accuracy(labels=label_ids, \n",
    "                                   predictions=tf.argmax(probs, axis=-1, output_type=tf.int32),\n",
    "                                   name=\"infer_accuracy\")\n",
    "    validation_metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)\n",
    "    acc_init = tf.variables_initializer(var_list=validation_metrics_vars)\n",
    "    sess.run(acc_init)\n",
    "#     s = np.zeros_like(x)\n",
    "#     feed_dict = {input_ids: x, label_ids: y, segment_ids: s, input_mask: m}\n",
    "#     preds = sess.run([probs], feed_dict=feed_dict)\n",
    "#     total_loss = g.get_tensor_by_name(\"loss:0\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
