{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Copy of Copy of BERT_for_intent_classification.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": [],
      "toc_visible": true,
      "include_colab_link": true
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.6.8"
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/zhenwenzhang/BERT-SLU/blob/master/BERT_for_intent_classification.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "tXbuDbK85HIy",
        "colab_type": "code",
        "outputId": "b403ce93-7c28-4f80-ccd9-fcc497a50170",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 118
        }
      },
      "source": [
        "! git clone https://github.com/zhenwenzhang/BERT-SLU.git\n",
        "# % cd BERT-SLU\n",
        "# ! rm -rf log\n",
        "# ! git"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Cloning into 'BERT-SLU'...\n",
            "remote: Enumerating objects: 64, done.\u001b[K\n",
            "remote: Counting objects:   1% (1/64)   \u001b[K\rremote: Counting objects:   3% (2/64)   \u001b[K\rremote: Counting objects:   4% (3/64)   \u001b[K\rremote: Counting objects:   6% (4/64)   \u001b[K\rremote: Counting objects:   7% (5/64)   \u001b[K\rremote: Counting objects:   9% (6/64)   \u001b[K\rremote: Counting objects:  10% (7/64)   \u001b[K\rremote: Counting objects:  12% (8/64)   \u001b[K\rremote: Counting objects:  14% (9/64)   \u001b[K\rremote: Counting objects:  15% (10/64)   \u001b[K\rremote: Counting objects:  17% (11/64)   \u001b[K\rremote: Counting objects:  18% (12/64)   \u001b[K\rremote: Counting objects:  20% (13/64)   \u001b[K\rremote: Counting objects:  21% (14/64)   \u001b[K\rremote: Counting objects:  23% (15/64)   \u001b[K\rremote: Counting objects:  25% (16/64)   \u001b[K\rremote: Counting objects:  26% (17/64)   \u001b[K\rremote: Counting objects:  28% (18/64)   \u001b[K\rremote: Counting objects:  29% (19/64)   \u001b[K\rremote: Counting objects:  31% (20/64)   \u001b[K\rremote: Counting objects:  32% (21/64)   \u001b[K\rremote: Counting objects:  34% (22/64)   \u001b[K\rremote: Counting objects:  35% (23/64)   \u001b[K\rremote: Counting objects:  37% (24/64)   \u001b[K\rremote: Counting objects:  39% (25/64)   \u001b[K\rremote: Counting objects:  40% (26/64)   \u001b[K\rremote: Counting objects:  42% (27/64)   \u001b[K\rremote: Counting objects:  43% (28/64)   \u001b[K\rremote: Counting objects:  45% (29/64)   \u001b[K\rremote: Counting objects:  46% (30/64)   \u001b[K\rremote: Counting objects:  48% (31/64)   \u001b[K\rremote: Counting objects:  50% (32/64)   \u001b[K\rremote: Counting objects:  51% (33/64)   \u001b[K\rremote: Counting objects:  53% (34/64)   \u001b[K\rremote: Counting objects:  54% (35/64)   \u001b[K\rremote: Counting objects:  56% (36/64)   \u001b[K\rremote: Counting objects:  57% (37/64)   \u001b[K\rremote: Counting objects:  59% (38/64)   \u001b[K\rremote: Counting objects:  60% (39/64)   \u001b[K\rremote: Counting objects:  62% (40/64)   \u001b[K\rremote: Counting objects:  64% (41/64)   \u001b[K\rremote: Counting objects:  65% (42/64)   \u001b[K\rremote: Counting objects:  67% (43/64)   \u001b[K\rremote: Counting objects:  68% (44/64)   \u001b[K\rremote: Counting objects:  70% (45/64)   \u001b[K\rremote: Counting objects:  71% (46/64)   \u001b[K\rremote: Counting objects:  73% (47/64)   \u001b[K\rremote: Counting objects:  75% (48/64)   \u001b[K\rremote: Counting objects:  76% (49/64)   \u001b[K\rremote: Counting objects:  78% (50/64)   \u001b[K\rremote: Counting objects:  79% (51/64)   \u001b[K\rremote: Counting objects:  81% (52/64)   \u001b[K\rremote: Counting objects:  82% (53/64)   \u001b[K\rremote: Counting objects:  84% (54/64)   \u001b[K\rremote: Counting objects:  85% (55/64)   \u001b[K\rremote: Counting objects:  87% (56/64)   \u001b[K\rremote: Counting objects:  89% (57/64)   \u001b[K\rremote: Counting objects:  90% (58/64)   \u001b[K\rremote: Counting objects:  92% (59/64)   \u001b[K\rremote: Counting objects:  93% (60/64)   \u001b[K\rremote: Counting objects:  95% (61/64)   \u001b[K\rremote: Counting objects:  96% (62/64)   \u001b[K\rremote: Counting objects:  98% (63/64)   \u001b[K\rremote: Counting objects: 100% (64/64)   \u001b[K\rremote: Counting objects: 100% (64/64), done.\u001b[K\n",
            "remote: Compressing objects: 100% (57/57), done.\u001b[K\n",
            "remote: Total 64 (delta 27), reused 15 (delta 4), pack-reused 0\u001b[K\n",
            "Unpacking objects: 100% (64/64), done.\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "ExecuteTime": {
          "end_time": "2019-05-10T03:00:03.192809Z",
          "start_time": "2019-05-10T03:00:01.649485Z"
        },
        "id": "WiZH9TXt2gcs",
        "colab_type": "code",
        "outputId": "5c34ff2a-0d9b-4234-9217-6e4d5919623d",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 87
        }
      },
      "source": [
        "\"\"\"BERT finetuning runner.\"\"\"\n",
        "from __future__ import absolute_import\n",
        "from __future__ import division\n",
        "from __future__ import print_function\n",
        "\n",
        "import collections\n",
        "import csv\n",
        "import os\n",
        "from bert import modeling\n",
        "from bert import optimization\n",
        "from bert import tokenization\n",
        "from bert import dataloader\n",
        "import tensorflow as tf\n",
        "from sklearn import metrics"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "WARNING: Logging before flag parsing goes to stderr.\n",
            "W0725 13:05:37.254161 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/optimization.py:84: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n",
            "\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Xh_2I89qRIYY",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# download pre-trained models\n",
        "# ! wget -P checkpoints https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip\n",
        "# ! wget -P checkpoints https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip\n",
        "# ! wget -P checkpoints https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip \n",
        "# ! wget -P checkpoints https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip\n",
        "# ! wget -P checkpoints https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip\n",
        "# ! wget -P checkpoints https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip\n",
        "\n",
        "# ! unzip -o checkpoints/multi_cased_L-12_H-768_A-12.zip -d checkpoints\n",
        "# ! unzip -o checkpoints/multilingual_L-12_H-768_A-12.zip -d checkpoints\n",
        "# ! unzip -o checkpoints/uncased_L-12_H-768_A-12.zip -d checkpoints\n",
        "# ! unzip -o checkpoints/uncased_L-24_H-1024_A-16.zip -d checkpoints\n",
        "# ! unzip -o checkpoints/cased_L-12_H-768_A-12.zip -d checkpoints\n",
        "# ! unzip -o checkpoints/cased_L-24_H-1024_A-16.zip -d checkpoints\n",
        "\n",
        "# ! rm -rf *.zip"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "LxysA12lcaxW",
        "colab_type": "text"
      },
      "source": [
        "### Parameter Settings"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sZk77YJ8Fzvl",
        "colab_type": "code",
        "cellView": "form",
        "colab": {}
      },
      "source": [
        "max_seq_length = 50 #@param {type:\"integer\"}\n",
        "train_batch_size = 32 #@param {type:\"integer\"}\n",
        "eval_batch_size = 32 #@param {type:\"integer\"}\n",
        "predict_batch_size = 32 #@param {type:\"integer\"}\n",
        "\n",
        "warmup_proportion = 0.1\n",
        "save_checkpoints_steps = 1000\n",
        "log_step_count_steps = 10\n",
        "save_summary_steps = 1\n",
        "\n",
        "learning_rate = 5e-5 #@param [\"5e-5\", \"3e-5\", \"2e-5\"] {type:\"raw\"}\n",
        "num_train_epochs = 1 #@param {type:\"integer\",min:1, max:10, step:1}\n",
        "do_train = True #@param [\"False\", \"True\"] {type:\"raw\"}\n",
        "do_eval = True #@param [\"False\", \"True\"] {type:\"raw\"}\n",
        "do_predict = True #@param [\"False\", \"True\"] {type:\"raw\"}\n",
        "\n",
        "log_dir = 'log' #@param {type:\"string\"}\n",
        "data_dir = 'data/atis' #@param [\"data/atis\", \"data/snips\"]\n",
        "checkpoints = 'checkpoints/multi_cased_L-12_H-768_A-12' #@param [\"checkpoints/multi_cased_L-12_H-768_A-12\",\"checkpoints/cased_L-12_H-768_A-12\",\"checkpoints/cased_L-24_H-1024_A-16\",\"checkpoints/uncased_L-12_H-768_A-12\",\"checkpoints/uncased_L-24_H-1024_A-16\"]\n",
        "bert_config_file = os.path.join(checkpoints, 'bert_config.json')\n",
        "vocab_file = os.path.join(checkpoints, 'vocab.txt')\n",
        "init_checkpoint = os.path.join(checkpoints, 'bert_model.ckpt')\n",
        "\n",
        "if checkpoints.split('/')[1].startswith('u'):\n",
        "  do_lower_case = True\n",
        "else:\n",
        "  do_lower_case = False    "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "i3suvDKrc9_o",
        "colab_type": "text"
      },
      "source": [
        "### Model Function"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "ExecuteTime": {
          "end_time": "2019-05-10T03:00:04.294358Z",
          "start_time": "2019-05-10T03:00:04.187995Z"
        },
        "id": "G7-9pHB02gdE",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n",
        "                 labels, num_labels):\n",
        "    model = modeling.BertModel(\n",
        "        config=bert_config,\n",
        "        is_training=is_training,\n",
        "        input_ids=input_ids,\n",
        "        input_mask=input_mask,\n",
        "        token_type_ids=segment_ids)\n",
        "\n",
        "    # If you want to use the token-level output, use model.get_sequence_output()\n",
        "    # instead.\n",
        "    output_layer = model.get_pooled_output()\n",
        "\n",
        "    hidden_size = output_layer.shape[-1].value\n",
        "\n",
        "    output_weights = tf.get_variable(\n",
        "        \"output_weights\", [num_labels, hidden_size],\n",
        "        initializer=tf.truncated_normal_initializer(stddev=0.02))\n",
        "\n",
        "    output_bias = tf.get_variable(\n",
        "        \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n",
        "\n",
        "    if is_training:\n",
        "        # I.e., 0.1 dropout\n",
        "        output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n",
        "\n",
        "    logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n",
        "    logits = tf.nn.bias_add(logits, output_bias)\n",
        "    probabilities = tf.nn.softmax(logits, axis=-1)\n",
        "    log_probs = tf.nn.log_softmax(logits, axis=-1)\n",
        "\n",
        "    one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n",
        "\n",
        "    per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n",
        "    loss = tf.reduce_mean(per_example_loss)\n",
        "\n",
        "    return (loss, logits, probabilities)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "ExecuteTime": {
          "end_time": "2019-05-10T03:00:04.404672Z",
          "start_time": "2019-05-10T03:00:04.296622Z"
        },
        "id": "z-RRG_AA2gdG",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps):\n",
        "    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument\n",
        "\n",
        "        tf.logging.info(\"*** Features ***\")\n",
        "        for name in sorted(features.keys()):\n",
        "            tf.logging.info(\"  name = %s, shape = %s\" % (name, features[name].shape))\n",
        "\n",
        "        input_ids = features[\"input_ids\"]\n",
        "        input_mask = features[\"input_mask\"]\n",
        "        segment_ids = features[\"segment_ids\"]\n",
        "        label_ids = features[\"label_ids\"]\n",
        "\n",
        "        is_real_example = None\n",
        "        if \"is_real_example\" in features:\n",
        "            is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n",
        "        else:\n",
        "            is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n",
        "\n",
        "        is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n",
        "\n",
        "        (total_loss, logits, probabilities) = create_model(\n",
        "            bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n",
        "            num_labels)\n",
        "\n",
        "        predicted_class = tf.argmax(logits, axis=-1, output_type=tf.int32)\n",
        "\n",
        "        accuracy = tf.metrics.accuracy(labels=label_ids,\n",
        "                                       predictions=predicted_class,\n",
        "                                       weights=is_real_example, name=\"acc_op\")\n",
        "        tf.summary.scalar(\"accuracy\", accuracy[1])\n",
        "\n",
        "        tvars = tf.trainable_variables()\n",
        "        initialized_variable_names = {}\n",
        "        if init_checkpoint:\n",
        "            (assignment_map, initialized_variable_names\n",
        "             ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n",
        "\n",
        "            tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n",
        "\n",
        "#         tf.logging.info(\"**** Trainable Variables ****\")\n",
        "#         for var in tvars:\n",
        "#             init_string = \"\"\n",
        "#             if var.name in initialized_variable_names:\n",
        "#                 init_string = \", *INIT_FROM_CKPT*\"\n",
        "#             tf.logging.info(\"  name = %s, shape = %s%s\", var.name, var.shape, init_string)\n",
        "\n",
        "        output_spec = None\n",
        "        if mode == tf.estimator.ModeKeys.TRAIN:\n",
        "\n",
        "            train_op = optimization.create_optimizer(\n",
        "                total_loss, learning_rate, num_train_steps, num_warmup_steps)\n",
        "\n",
        "            output_spec = tf.estimator.EstimatorSpec(\n",
        "                mode=mode,\n",
        "                loss=total_loss,\n",
        "                train_op=train_op\n",
        "            )\n",
        "        elif mode == tf.estimator.ModeKeys.EVAL:\n",
        "\n",
        "            output_spec = tf.estimator.EstimatorSpec(\n",
        "                mode=mode,\n",
        "                loss=total_loss,\n",
        "                eval_metric_ops={\"accuracy\": accuracy}\n",
        "            )\n",
        "        else:\n",
        "            output_spec = tf.estimator.EstimatorSpec(\n",
        "                mode=mode,\n",
        "                predictions={\"predicted_class\": predicted_class,\n",
        "                             \"true_class\": label_ids}\n",
        "            )\n",
        "        return output_spec\n",
        "\n",
        "    return model_fn"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ixu2NqJIdFbT",
        "colab_type": "text"
      },
      "source": [
        "### Train"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "ExecuteTime": {
          "end_time": "2019-05-10T03:15:45.304338Z",
          "start_time": "2019-05-10T03:00:04.407792Z"
        },
        "scrolled": true,
        "id": "HxnKgYie2gdH",
        "colab_type": "code",
        "outputId": "b549a0c1-19ad-43ae-9f5b-cbc5e6727c2f",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "tf.logging.set_verbosity(tf.logging.INFO)\n",
        "if not do_train and not do_eval and not do_predict:\n",
        "    raise ValueError(\n",
        "        \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n",
        "tf.gfile.MakeDirs(log_dir)\n",
        "processor = DataProcessor(data_dir)\n",
        "label_map, num_labels = processor.get_labels_info()\n",
        "tokenization.validate_case_matches_checkpoint(do_lower_case, init_checkpoint)\n",
        "bert_config = modeling.BertConfig.from_json_file(bert_config_file)\n",
        "\n",
        "if max_seq_length > bert_config.max_position_embeddings:\n",
        "    raise ValueError(\"Cannot use sequence length %d because the BERT model \"\n",
        "                     \"was only trained up to sequence length %d\" %\n",
        "                     (max_seq_length, bert_config.max_position_embeddings))\n",
        "tokenizer = tokenization.FullTokenizer(\n",
        "    vocab_file=vocab_file, do_lower_case=do_lower_case)\n",
        "\n",
        "config = tf.ConfigProto()\n",
        "config.gpu_options.allow_growth = True\n",
        "run_config = tf.estimator.RunConfig(\n",
        "    model_dir=log_dir,\n",
        "    session_config=config,\n",
        "    save_checkpoints_steps=save_checkpoints_steps,\n",
        "    log_step_count_steps=log_step_count_steps,\n",
        "    save_summary_steps=save_summary_steps)\n",
        "train_examples = None\n",
        "num_train_steps = None\n",
        "num_warmup_steps = None\n",
        "\n",
        "if do_train:\n",
        "    train_examples = processor.get_train_examples()\n",
        "    num_train_steps = int(\n",
        "        len(train_examples) / train_batch_size * num_train_epochs)\n",
        "    num_warmup_steps = int(num_train_steps * warmup_proportion)\n",
        "\n",
        "model_fn = model_fn_builder(\n",
        "    bert_config=bert_config,\n",
        "    num_labels=num_labels,\n",
        "    init_checkpoint=init_checkpoint,\n",
        "    learning_rate=learning_rate,\n",
        "    num_train_steps=num_train_steps,\n",
        "    num_warmup_steps=num_warmup_steps)\n",
        "\n",
        "estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)\n",
        "\n",
        "# Training\n",
        "if do_train:\n",
        "    train_file = os.path.join(log_dir, \"train.tf_record\")\n",
        "    file_based_convert_examples_to_features(\n",
        "        train_examples, label_map, max_seq_length, tokenizer, train_file)\n",
        "    tf.logging.info(\"***** Running training *****\")\n",
        "    tf.logging.info(\"  Num examples = %d\", len(train_examples))\n",
        "    tf.logging.info(\"  Batch size = %d\", train_batch_size)\n",
        "    tf.logging.info(\"  Num steps = %d\", num_train_steps)\n",
        "    train_input_fn = file_based_input_fn_builder(\n",
        "        input_file=train_file,\n",
        "        seq_length=max_seq_length,\n",
        "        is_training=True,\n",
        "        drop_remainder=False,\n",
        "        batch_size=train_batch_size)\n",
        "    estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)"
      ],
      "execution_count": 15,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "W0725 13:05:38.547155 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/modeling.py:93: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n",
            "\n",
            "I0725 13:05:39.034579 139888097109888 estimator.py:209] Using config: {'_model_dir': 'log', '_tf_random_seed': None, '_save_summary_steps': 1, '_save_checkpoints_steps': 1000, '_save_checkpoints_secs': None, '_session_config': gpu_options {\n",
            "  allow_growth: true\n",
            "}\n",
            ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 10, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f39cae48940>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n",
            "W0725 13:05:39.036612 139888097109888 model_fn.py:630] Estimator's model_fn (<function model_fn_builder.<locals>.model_fn at 0x7f3a1930a6a8>) includes params argument, but params are not passed to Estimator.\n",
            "I0725 13:05:39.038886 139888097109888 <ipython-input-10-4818e34a2ae0>:9] Writing example 0 of 4478\n",
            "I0725 13:05:39.043929 139888097109888 <ipython-input-9-319d0dfaaa64>:63] *** Example ***\n",
            "I0725 13:05:39.045135 139888097109888 <ipython-input-9-319d0dfaaa64>:64] guid: train-0\n",
            "I0725 13:05:39.046404 139888097109888 <ipython-input-9-319d0dfaaa64>:66] tokens: [CLS] i want to fly from bal ##timo ##re to dalla ##s round trip [SEP]\n",
            "I0725 13:05:39.047575 139888097109888 <ipython-input-9-319d0dfaaa64>:68] input_ids: 101 177 21528 10114 26155 10188 20873 65258 10246 10114 11353 10107 13569 37307 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:05:39.048691 139888097109888 <ipython-input-9-319d0dfaaa64>:70] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:05:39.050424 139888097109888 <ipython-input-9-319d0dfaaa64>:72] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:05:39.051551 139888097109888 <ipython-input-9-319d0dfaaa64>:73] label: atis_flight (id = 11)\n",
            "I0725 13:05:39.054409 139888097109888 <ipython-input-9-319d0dfaaa64>:63] *** Example ***\n",
            "I0725 13:05:39.056105 139888097109888 <ipython-input-9-319d0dfaaa64>:64] guid: train-1\n",
            "I0725 13:05:39.057412 139888097109888 <ipython-input-9-319d0dfaaa64>:66] tokens: [CLS] round trip fare ##s from bal ##timo ##re to phi ##lade ##lp ##hia less than 1000 dollars round trip fare ##s from den ##ver to phi ##lade ##lp ##hia less than 1000 dollars round trip fare ##s from pit ##ts ##burgh to phi ##lade ##lp ##hia less than [SEP]\n",
            "I0725 13:05:39.059397 139888097109888 <ipython-input-9-319d0dfaaa64>:68] input_ids: 101 13569 37307 23252 10107 10188 20873 65258 10246 10114 36500 21805 35451 27919 15306 11084 12186 27953 13569 37307 23252 10107 10188 10140 12563 10114 36500 21805 35451 27919 15306 11084 12186 27953 13569 37307 23252 10107 10188 55277 10806 94202 10114 36500 21805 35451 27919 15306 11084 102\n",
            "I0725 13:05:39.060576 139888097109888 <ipython-input-9-319d0dfaaa64>:70] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
            "I0725 13:05:39.062236 139888097109888 <ipython-input-9-319d0dfaaa64>:72] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:05:39.064049 139888097109888 <ipython-input-9-319d0dfaaa64>:73] label: atis_airfare (id = 22)\n",
            "I0725 13:05:40.916762 139888097109888 <ipython-input-15-743e11141635>:51] ***** Running training *****\n",
            "I0725 13:05:40.917975 139888097109888 <ipython-input-15-743e11141635>:52]   Num examples = 4478\n",
            "I0725 13:05:40.919707 139888097109888 <ipython-input-15-743e11141635>:53]   Batch size = 32\n",
            "I0725 13:05:40.920739 139888097109888 <ipython-input-15-743e11141635>:54]   Num steps = 139\n",
            "W0725 13:05:40.946482 139888097109888 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/training_util.py:236: Variable.initialized_value (from tensorflow.python.ops.variables) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use Variable.read_value. Variables in 2.X are initialized automatically both in eager and graph (inside tf.defun) contexts.\n",
            "W0725 13:05:40.987662 139888097109888 deprecation.py:323] From <ipython-input-11-a0b5cd77e850>:33: map_and_batch (from tensorflow.python.data.experimental.ops.batching) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by `tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data optimizations will take care of using the fused implementation.\n",
            "I0725 13:05:41.014954 139888097109888 estimator.py:1145] Calling model_fn.\n",
            "I0725 13:05:41.016031 139888097109888 <ipython-input-14-64f48bfe5f20>:4] *** Features ***\n",
            "I0725 13:05:41.018064 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = input_ids, shape = (?, 50)\n",
            "I0725 13:05:41.021644 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = input_mask, shape = (?, 50)\n",
            "I0725 13:05:41.023739 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = is_real_example, shape = (?,)\n",
            "I0725 13:05:41.026594 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = label_ids, shape = (?,)\n",
            "I0725 13:05:41.028835 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = segment_ids, shape = (?, 50)\n",
            "W0725 13:05:41.037174 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/modeling.py:171: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n",
            "\n",
            "W0725 13:05:41.040536 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/modeling.py:409: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.\n",
            "\n",
            "W0725 13:05:41.076908 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/modeling.py:490: The name tf.assert_less_equal is deprecated. Please use tf.compat.v1.assert_less_equal instead.\n",
            "\n",
            "W0725 13:05:42.471842 139888097109888 lazy_loader.py:50] \n",
            "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
            "For more information, please see:\n",
            "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
            "  * https://github.com/tensorflow/addons\n",
            "  * https://github.com/tensorflow/io (for I/O related ops)\n",
            "If you depend on functionality not listed there, please file an issue.\n",
            "\n",
            "W0725 13:05:42.495929 139888097109888 deprecation.py:506] From /content/BERT-SLU/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
            "W0725 13:05:42.536851 139888097109888 deprecation.py:323] From /content/BERT-SLU/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use keras.layers.dense instead.\n",
            "W0725 13:05:46.294146 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/optimization.py:27: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n",
            "\n",
            "W0725 13:05:46.296745 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/optimization.py:32: The name tf.train.polynomial_decay is deprecated. Please use tf.compat.v1.train.polynomial_decay instead.\n",
            "\n",
            "W0725 13:05:46.307875 139888097109888 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py:409: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Deprecated in favor of operator or tf.math.divide.\n",
            "W0725 13:05:46.323178 139888097109888 deprecation_wrapper.py:119] From /content/BERT-SLU/bert/optimization.py:67: The name tf.trainable_variables is deprecated. Please use tf.compat.v1.trainable_variables instead.\n",
            "\n",
            "W0725 13:05:46.735526 139888097109888 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:1205: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
            "I0725 13:05:55.891367 139888097109888 estimator.py:1147] Done calling model_fn.\n",
            "I0725 13:05:55.894071 139888097109888 basic_session_run_hooks.py:541] Create CheckpointSaverHook.\n",
            "I0725 13:05:59.246504 139888097109888 monitored_session.py:240] Graph was finalized.\n",
            "I0725 13:06:16.237473 139888097109888 session_manager.py:500] Running local_init_op.\n",
            "I0725 13:06:16.420181 139888097109888 session_manager.py:502] Done running local_init_op.\n",
            "I0725 13:06:24.423384 139888097109888 basic_session_run_hooks.py:606] Saving checkpoints for 0 into log/model.ckpt.\n",
            "I0725 13:06:50.049423 139888097109888 basic_session_run_hooks.py:262] loss = 3.264139, step = 0\n",
            "I0725 13:07:01.194835 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 0.897175\n",
            "I0725 13:07:01.200052 139888097109888 basic_session_run_hooks.py:260] loss = 1.229816, step = 10 (11.151 sec)\n",
            "I0725 13:07:05.430270 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.36103\n",
            "I0725 13:07:05.432808 139888097109888 basic_session_run_hooks.py:260] loss = 1.0138252, step = 20 (4.233 sec)\n",
            "I0725 13:07:09.674729 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.35603\n",
            "I0725 13:07:09.678979 139888097109888 basic_session_run_hooks.py:260] loss = 0.35862795, step = 30 (4.246 sec)\n",
            "I0725 13:07:13.944977 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.34177\n",
            "I0725 13:07:13.949571 139888097109888 basic_session_run_hooks.py:260] loss = 0.5629216, step = 40 (4.271 sec)\n",
            "I0725 13:07:18.226231 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.33577\n",
            "I0725 13:07:18.231100 139888097109888 basic_session_run_hooks.py:260] loss = 0.93975425, step = 50 (4.282 sec)\n",
            "I0725 13:07:22.521363 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.32823\n",
            "I0725 13:07:22.526640 139888097109888 basic_session_run_hooks.py:260] loss = 1.3346605, step = 60 (4.296 sec)\n",
            "I0725 13:07:26.828617 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.32165\n",
            "I0725 13:07:26.830948 139888097109888 basic_session_run_hooks.py:260] loss = 0.6680515, step = 70 (4.304 sec)\n",
            "I0725 13:07:31.142333 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.31821\n",
            "I0725 13:07:31.147507 139888097109888 basic_session_run_hooks.py:260] loss = 0.57802784, step = 80 (4.317 sec)\n",
            "I0725 13:07:35.469558 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.31094\n",
            "I0725 13:07:35.474174 139888097109888 basic_session_run_hooks.py:260] loss = 0.5304872, step = 90 (4.327 sec)\n",
            "I0725 13:07:39.811920 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.30289\n",
            "I0725 13:07:39.814986 139888097109888 basic_session_run_hooks.py:260] loss = 0.53908944, step = 100 (4.341 sec)\n",
            "I0725 13:07:44.160015 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.29985\n",
            "I0725 13:07:44.162223 139888097109888 basic_session_run_hooks.py:260] loss = 0.61714983, step = 110 (4.347 sec)\n",
            "I0725 13:07:48.523492 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.29177\n",
            "I0725 13:07:48.532045 139888097109888 basic_session_run_hooks.py:260] loss = 0.35656506, step = 120 (4.370 sec)\n",
            "I0725 13:07:52.916771 139888097109888 basic_session_run_hooks.py:692] global_step/sec: 2.27619\n",
            "I0725 13:07:52.922657 139888097109888 basic_session_run_hooks.py:260] loss = 0.70242697, step = 130 (4.391 sec)\n",
            "I0725 13:07:56.437977 139888097109888 basic_session_run_hooks.py:606] Saving checkpoints for 139 into log/model.ckpt.\n",
            "I0725 13:08:11.694936 139888097109888 estimator.py:368] Loss for final step: 0.3666439.\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "pYRHXA2cdKyk",
        "colab_type": "text"
      },
      "source": [
        "### Evaluate"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "ExecuteTime": {
          "end_time": "2019-05-10T03:15:45.309668Z",
          "start_time": "2019-05-10T03:00:04.197Z"
        },
        "id": "9m6A-f2V2gdL",
        "colab_type": "code",
        "outputId": "91d700c5-b618-45da-de50-871f2971f342",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 726
        }
      },
      "source": [
        "if do_eval:\n",
        "    eval_examples = processor.get_dev_examples()\n",
        "    num_actual_eval_examples = len(eval_examples)\n",
        "    eval_file = os.path.join(log_dir, \"eval.tf_record\")\n",
        "    file_based_convert_examples_to_features(\n",
        "        eval_examples, label_map, max_seq_length, tokenizer, eval_file)\n",
        "    tf.logging.info(\"***** Running evaluation *****\")\n",
        "    tf.logging.info(\"  Num examples = %d (%d actual, %d padding)\",\n",
        "                    len(eval_examples), num_actual_eval_examples,\n",
        "                    len(eval_examples) - num_actual_eval_examples)\n",
        "    tf.logging.info(\"  Batch size = %d\", eval_batch_size)\n",
        "\n",
        "    eval_input_fn = file_based_input_fn_builder(\n",
        "        input_file=eval_file,\n",
        "        seq_length=max_seq_length,\n",
        "        is_training=False,\n",
        "        drop_remainder=False,\n",
        "        batch_size=eval_batch_size)\n",
        "\n",
        "    result = estimator.evaluate(input_fn=eval_input_fn)\n",
        "\n",
        "    output_eval_file = os.path.join(log_dir, \"eval_results.txt\")\n",
        "    with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n",
        "        tf.logging.info(\"***** Eval results *****\")\n",
        "        for key in sorted(result.keys()):\n",
        "            tf.logging.info(\"  %s = %s\", key, str(result[key]))\n",
        "            writer.write(\"%s = %s\\n\" % (key, str(result[key])))"
      ],
      "execution_count": 16,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "I0725 13:08:11.726988 139888097109888 <ipython-input-10-4818e34a2ae0>:9] Writing example 0 of 500\n",
            "I0725 13:08:11.728673 139888097109888 <ipython-input-9-319d0dfaaa64>:63] *** Example ***\n",
            "I0725 13:08:11.729657 139888097109888 <ipython-input-9-319d0dfaaa64>:64] guid: dev-0\n",
            "I0725 13:08:11.731560 139888097109888 <ipython-input-9-319d0dfaaa64>:66] tokens: [CLS] i want to fly from bost ##on at 838 am and arrive in den ##ver at 1110 in the morning [SEP]\n",
            "I0725 13:08:11.733252 139888097109888 <ipython-input-9-319d0dfaaa64>:68] input_ids: 101 177 21528 10114 26155 10188 29495 10263 10160 82665 10392 10111 27814 10106 10140 12563 10160 106270 10106 10105 28757 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:08:11.735260 139888097109888 <ipython-input-9-319d0dfaaa64>:70] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:08:11.737380 139888097109888 <ipython-input-9-319d0dfaaa64>:72] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:08:11.738716 139888097109888 <ipython-input-9-319d0dfaaa64>:73] label: atis_flight (id = 11)\n",
            "I0725 13:08:11.741641 139888097109888 <ipython-input-9-319d0dfaaa64>:63] *** Example ***\n",
            "I0725 13:08:11.743603 139888097109888 <ipython-input-9-319d0dfaaa64>:64] guid: dev-1\n",
            "I0725 13:08:11.744936 139888097109888 <ipython-input-9-319d0dfaaa64>:66] tokens: [CLS] show me all round trip flights between hou ##ston and las veg ##as [SEP]\n",
            "I0725 13:08:11.746854 139888097109888 <ipython-input-9-319d0dfaaa64>:68] input_ids: 101 11897 10911 10435 13569 37307 55650 10948 109601 21884 10111 10285 108193 10403 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:08:11.748463 139888097109888 <ipython-input-9-319d0dfaaa64>:70] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:08:11.750380 139888097109888 <ipython-input-9-319d0dfaaa64>:72] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
            "I0725 13:08:11.751940 139888097109888 <ipython-input-9-319d0dfaaa64>:73] label: atis_flight (id = 11)\n",
            "I0725 13:08:11.970150 139888097109888 <ipython-input-16-2cefe2c006b1>:7] ***** Running evaluation *****\n",
            "I0725 13:08:11.971163 139888097109888 <ipython-input-16-2cefe2c006b1>:10]   Num examples = 500 (500 actual, 0 padding)\n",
            "I0725 13:08:11.971967 139888097109888 <ipython-input-16-2cefe2c006b1>:11]   Batch size = 32\n",
            "I0725 13:08:12.013962 139888097109888 estimator.py:1145] Calling model_fn.\n",
            "I0725 13:08:12.014881 139888097109888 <ipython-input-14-64f48bfe5f20>:4] *** Features ***\n",
            "I0725 13:08:12.015876 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = input_ids, shape = (?, 50)\n",
            "I0725 13:08:12.018014 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = input_mask, shape = (?, 50)\n",
            "I0725 13:08:12.020537 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = is_real_example, shape = (?,)\n",
            "I0725 13:08:12.021756 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = label_ids, shape = (?,)\n",
            "I0725 13:08:12.022915 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = segment_ids, shape = (?, 50)\n",
            "I0725 13:08:15.414391 139888097109888 estimator.py:1147] Done calling model_fn.\n",
            "I0725 13:08:15.443503 139888097109888 evaluation.py:255] Starting evaluation at 2019-07-25T13:08:15Z\n",
            "I0725 13:08:15.974504 139888097109888 monitored_session.py:240] Graph was finalized.\n",
            "W0725 13:08:15.982285 139888097109888 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use standard file APIs to check for files with this prefix.\n",
            "I0725 13:08:15.991008 139888097109888 saver.py:1280] Restoring parameters from log/model.ckpt-139\n",
            "I0725 13:08:16.857399 139888097109888 session_manager.py:500] Running local_init_op.\n",
            "I0725 13:08:16.919347 139888097109888 session_manager.py:502] Done running local_init_op.\n",
            "I0725 13:08:19.650732 139888097109888 evaluation.py:275] Finished evaluation at 2019-07-25-13:08:19\n",
            "I0725 13:08:19.652044 139888097109888 estimator.py:2039] Saving dict for global step 139: accuracy = 0.802, global_step = 139, loss = 0.6847833\n",
            "I0725 13:08:20.195577 139888097109888 estimator.py:2099] Saving 'checkpoint_path' summary for global step 139: log/model.ckpt-139\n",
            "I0725 13:08:20.197674 139888097109888 <ipython-input-16-2cefe2c006b1>:24] ***** Eval results *****\n",
            "I0725 13:08:20.200990 139888097109888 <ipython-input-16-2cefe2c006b1>:26]   accuracy = 0.802\n",
            "I0725 13:08:20.203057 139888097109888 <ipython-input-16-2cefe2c006b1>:26]   global_step = 139\n",
            "I0725 13:08:20.204864 139888097109888 <ipython-input-16-2cefe2c006b1>:26]   loss = 0.6847833\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "pKalVYoAdOno",
        "colab_type": "text"
      },
      "source": [
        "### Predict"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "ExecuteTime": {
          "end_time": "2019-05-10T03:15:45.310973Z",
          "start_time": "2019-05-10T03:00:04.199Z"
        },
        "id": "8MCEcX892gdM",
        "colab_type": "code",
        "outputId": "165498e7-9412-4b81-86e9-ce6e02e22f79",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 826
        }
      },
      "source": [
        "if do_predict:\n",
        "\n",
        "    predict_examples = processor.get_test_examples()\n",
        "    num_actual_predict_examples = len(predict_examples)\n",
        "\n",
        "    predict_file = os.path.join(log_dir, \"predict.tf_record\")\n",
        "    if not tf.gfile.Exists(predict_file):\n",
        "        file_based_convert_examples_to_features(predict_examples, label_map,\n",
        "                                                max_seq_length, tokenizer,\n",
        "                                                predict_file)\n",
        "\n",
        "    tf.logging.info(\"***** Running prediction*****\")\n",
        "    tf.logging.info(\"  Num examples = %d (%d actual, %d padding)\",\n",
        "                    len(predict_examples), num_actual_predict_examples,\n",
        "                    len(predict_examples) - num_actual_predict_examples)\n",
        "    tf.logging.info(\"  Batch size = %d\", predict_batch_size)\n",
        "\n",
        "    predict_input_fn = file_based_input_fn_builder(\n",
        "        input_file=predict_file,\n",
        "        seq_length=max_seq_length,\n",
        "        is_training=False,\n",
        "        drop_remainder=False,\n",
        "        batch_size=predict_batch_size)\n",
        "\n",
        "    result = estimator.predict(input_fn=predict_input_fn)\n",
        "    label_map_new = {v: k for k, v in label_map.items()}\n",
        "\n",
        "\n",
        "    output_predict_file = os.path.join(log_dir, \"test_results.tsv\")\n",
        "    with tf.gfile.GFile(output_predict_file, \"w\") as writer:\n",
        "        writer.write(\"line, true, predict \\n\")\n",
        "        tf.logging.info(\"***** Predict results *****\")\n",
        "\n",
        "        true_classes = []\n",
        "        predicted_classes = []\n",
        "\n",
        "        for i, item in enumerate(result):\n",
        "                true_class = item[\"true_class\"]\n",
        "                predicted_class = item[\"predicted_class\"]\n",
        "\n",
        "                true_classes.append(true_class)\n",
        "                predicted_classes.append(predicted_class)\n",
        "\n",
        "                if predicted_class != true_class:\n",
        "                    output_line = \"{}, {}, {}\\n\".format(i + 1, label_map_new[true_class],\n",
        "                                                        label_map_new[predicted_class])\n",
        "                    writer.write(output_line)\n",
        "    accuracy = metrics.accuracy_score(true_classes, predicted_classes)\n",
        "    classification_report = metrics.classification_report(true_classes, predicted_classes)\n",
        "    tf.logging.info(\"Accuracy: %s\", accuracy)\n",
        "    tf.logging.info(\"\\nAccuracy: %s\", classification_report)"
      ],
      "execution_count": 19,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "I0725 13:31:20.848347 139888097109888 <ipython-input-19-d27651733b67>:12] ***** Running prediction*****\n",
            "I0725 13:31:20.851618 139888097109888 <ipython-input-19-d27651733b67>:15]   Num examples = 893 (893 actual, 0 padding)\n",
            "I0725 13:31:20.852965 139888097109888 <ipython-input-19-d27651733b67>:16]   Batch size = 32\n",
            "I0725 13:31:20.856503 139888097109888 <ipython-input-19-d27651733b67>:32] ***** Predict results *****\n",
            "I0725 13:31:20.900712 139888097109888 estimator.py:1145] Calling model_fn.\n",
            "I0725 13:31:20.901857 139888097109888 <ipython-input-14-64f48bfe5f20>:4] *** Features ***\n",
            "I0725 13:31:20.905015 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = input_ids, shape = (?, 50)\n",
            "I0725 13:31:20.907747 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = input_mask, shape = (?, 50)\n",
            "I0725 13:31:20.912290 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = is_real_example, shape = (?,)\n",
            "I0725 13:31:20.913725 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = label_ids, shape = (?,)\n",
            "I0725 13:31:20.915978 139888097109888 <ipython-input-14-64f48bfe5f20>:6]   name = segment_ids, shape = (?, 50)\n",
            "I0725 13:31:24.134561 139888097109888 estimator.py:1147] Done calling model_fn.\n",
            "I0725 13:31:24.644235 139888097109888 monitored_session.py:240] Graph was finalized.\n",
            "I0725 13:31:24.656652 139888097109888 saver.py:1280] Restoring parameters from log/model.ckpt-139\n",
            "I0725 13:31:25.566498 139888097109888 session_manager.py:500] Running local_init_op.\n",
            "I0725 13:31:25.628158 139888097109888 session_manager.py:502] Done running local_init_op.\n",
            "/usr/local/lib/python3.6/dist-packages/sklearn/metrics/classification.py:1437: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.\n",
            "  'precision', 'predicted', average, warn_for)\n",
            "I0725 13:31:29.437628 139888097109888 <ipython-input-19-d27651733b67>:50] Accuracy: 0.7782754759238522\n",
            "I0725 13:31:29.438534 139888097109888 <ipython-input-19-d27651733b67>:51] \n",
            "Accuracy:               precision    recall  f1-score   support\n",
            "\n",
            "           1       0.00      0.00      0.00         3\n",
            "           2       0.00      0.00      0.00         6\n",
            "           4       0.21      0.92      0.34        36\n",
            "           5       0.00      0.00      0.00         7\n",
            "           6       0.00      0.00      0.00         1\n",
            "           7       0.00      0.00      0.00         1\n",
            "           8       0.00      0.00      0.00         8\n",
            "           9       0.00      0.00      0.00         1\n",
            "          10       0.00      0.00      0.00        12\n",
            "          11       0.98      0.97      0.98       632\n",
            "          12       0.00      0.00      0.00        10\n",
            "          13       0.00      0.00      0.00         2\n",
            "          14       0.00      0.00      0.00         6\n",
            "          16       0.00      0.00      0.00        21\n",
            "          17       0.00      0.00      0.00        18\n",
            "          19       0.00      0.00      0.00        38\n",
            "          21       0.00      0.00      0.00         1\n",
            "          22       0.44      0.98      0.61        48\n",
            "          24       0.00      0.00      0.00         9\n",
            "          25       0.00      0.00      0.00        33\n",
            "\n",
            "    accuracy                           0.78       893\n",
            "   macro avg       0.08      0.14      0.10       893\n",
            "weighted avg       0.73      0.78      0.74       893\n",
            "\n"
          ],
          "name": "stderr"
        }
      ]
    }
  ]
}