{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# !wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip\n",
    "# !unzip uncased_L-12_H-768_A-12.zip\n",
    "# !wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json\n",
    "# !wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SquadExample(object):\n",
    "  \"\"\"A single training/test example for simple sequence classification.\n",
    "     For examples without an answer, the start and end position are -1.\n",
    "  \"\"\"\n",
    "\n",
    "  def __init__(self,\n",
    "               qas_id,\n",
    "               question_text,\n",
    "               doc_tokens,\n",
    "               orig_answer_text=None,\n",
    "               start_position=None,\n",
    "               end_position=None,\n",
    "               is_impossible=False):\n",
    "    self.qas_id = qas_id\n",
    "    self.question_text = question_text\n",
    "    self.doc_tokens = doc_tokens\n",
    "    self.orig_answer_text = orig_answer_text\n",
    "    self.start_position = start_position\n",
    "    self.end_position = end_position\n",
    "    self.is_impossible = is_impossible\n",
    "\n",
    "  def __str__(self):\n",
    "    return self.__repr__()\n",
    "\n",
    "  def __repr__(self):\n",
    "    s = \"\"\n",
    "    s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n",
    "    s += \", question_text: %s\" % (\n",
    "        tokenization.printable_text(self.question_text))\n",
    "    s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n",
    "    if self.start_position:\n",
    "      s += \", start_position: %d\" % (self.start_position)\n",
    "    if self.start_position:\n",
    "      s += \", end_position: %d\" % (self.end_position)\n",
    "    if self.start_position:\n",
    "      s += \", is_impossible: %r\" % (self.is_impossible)\n",
    "    return s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import bert\n",
    "from bert import run_classifier\n",
    "from bert import optimization\n",
    "from bert import tokenization\n",
    "from bert import modeling\n",
    "from tqdm import tqdm\n",
    "import json\n",
    "import math\n",
    "\n",
    "BERT_VOCAB = 'uncased_L-12_H-768_A-12/vocab.txt'\n",
    "BERT_INIT_CHKPNT = 'uncased_L-12_H-768_A-12/bert_model.ckpt'\n",
    "BERT_CONFIG = 'uncased_L-12_H-768_A-12/bert_config.json'\n",
    "\n",
    "tokenization.validate_case_matches_checkpoint(True,BERT_INIT_CHKPNT)\n",
    "tokenizer = tokenization.FullTokenizer(\n",
    "      vocab_file=BERT_VOCAB, do_lower_case=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [],
   "source": [
    "version_2_with_negative = False\n",
    "\n",
    "def read_squad_examples(input_file, is_training, version_2_with_negative = False):\n",
    "    \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n",
    "    with tf.gfile.Open(input_file, 'r') as reader:\n",
    "        input_data = json.load(reader)['data']\n",
    "\n",
    "    def is_whitespace(c):\n",
    "        if c == ' ' or c == '\\t' or c == '\\r' or c == '\\n' or ord(c) == 0x202F:\n",
    "            return True\n",
    "        return False\n",
    "\n",
    "    examples = []\n",
    "    for entry in input_data:\n",
    "        for paragraph in entry['paragraphs']:\n",
    "            paragraph_text = paragraph['context']\n",
    "            doc_tokens = []\n",
    "            char_to_word_offset = []\n",
    "            prev_is_whitespace = True\n",
    "            for c in paragraph_text:\n",
    "                if is_whitespace(c):\n",
    "                    prev_is_whitespace = True\n",
    "                else:\n",
    "                    if prev_is_whitespace:\n",
    "                        doc_tokens.append(c)\n",
    "                    else:\n",
    "                        doc_tokens[-1] += c\n",
    "                    prev_is_whitespace = False\n",
    "                char_to_word_offset.append(len(doc_tokens) - 1)\n",
    "\n",
    "            for qa in paragraph['qas']:\n",
    "                qas_id = qa['id']\n",
    "                question_text = qa['question']\n",
    "                start_position = None\n",
    "                end_position = None\n",
    "                orig_answer_text = None\n",
    "                is_impossible = False\n",
    "                if is_training:\n",
    "\n",
    "                    if version_2_with_negative:\n",
    "                        is_impossible = qa['is_impossible']\n",
    "                    if (len(qa['answers']) != 1) and (not is_impossible):\n",
    "                        raise ValueError(\n",
    "                            'For training, each question should have exactly 1 answer.'\n",
    "                        )\n",
    "                    if not is_impossible:\n",
    "                        answer = qa['answers'][0]\n",
    "                        orig_answer_text = answer['text']\n",
    "                        answer_offset = answer['answer_start']\n",
    "                        answer_length = len(orig_answer_text)\n",
    "                        start_position = char_to_word_offset[answer_offset]\n",
    "                        end_position = char_to_word_offset[\n",
    "                            answer_offset + answer_length - 1\n",
    "                        ]\n",
    "                        actual_text = ' '.join(\n",
    "                            doc_tokens[start_position : (end_position + 1)]\n",
    "                        )\n",
    "                        cleaned_answer_text = ' '.join(\n",
    "                            tokenization.whitespace_tokenize(orig_answer_text)\n",
    "                        )\n",
    "                        if actual_text.find(cleaned_answer_text) == -1:\n",
    "                            tf.logging.warning(\n",
    "                                \"Could not find answer: '%s' vs. '%s'\",\n",
    "                                actual_text,\n",
    "                                cleaned_answer_text,\n",
    "                            )\n",
    "                            continue\n",
    "                    else:\n",
    "                        start_position = -1\n",
    "                        end_position = -1\n",
    "                        orig_answer_text = ''\n",
    "\n",
    "                example = SquadExample(\n",
    "                    qas_id = qas_id,\n",
    "                    question_text = question_text,\n",
    "                    doc_tokens = doc_tokens,\n",
    "                    orig_answer_text = orig_answer_text,\n",
    "                    start_position = start_position,\n",
    "                    end_position = end_position,\n",
    "                    is_impossible = is_impossible,\n",
    "                )\n",
    "                examples.append(example)\n",
    "\n",
    "    return examples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "squad_train = read_squad_examples('train-v1.1.json', True)\n",
    "squad_test = read_squad_examples('dev-v1.1.json', False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 174,
   "metadata": {},
   "outputs": [],
   "source": [
    "import six\n",
    "\n",
    "def _improve_answer_span(\n",
    "    doc_tokens, input_start, input_end, tokenizer, orig_answer_text\n",
    "):\n",
    "    \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n",
    "\n",
    "    # The SQuAD annotations are character based. We first project them to\n",
    "    # whitespace-tokenized words. But then after WordPiece tokenization, we can\n",
    "    # often find a \"better match\". For example:\n",
    "    #\n",
    "    #   Question: What year was John Smith born?\n",
    "    #   Context: The leader was John Smith (1895-1943).\n",
    "    #   Answer: 1895\n",
    "    #\n",
    "    # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n",
    "    # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n",
    "    # the exact answer, 1895.\n",
    "    #\n",
    "    # However, this is not always possible. Consider the following:\n",
    "    #\n",
    "    #   Question: What country is the top exporter of electornics?\n",
    "    #   Context: The Japanese electronics industry is the lagest in the world.\n",
    "    #   Answer: Japan\n",
    "    #\n",
    "    # In this case, the annotator chose \"Japan\" as a character sub-span of\n",
    "    # the word \"Japanese\". Since our WordPiece tokenizer does not split\n",
    "    # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n",
    "    # in SQuAD, but does happen.\n",
    "    tok_answer_text = ' '.join(tokenizer.tokenize(orig_answer_text))\n",
    "\n",
    "    for new_start in range(input_start, input_end + 1):\n",
    "        for new_end in range(input_end, new_start - 1, -1):\n",
    "            text_span = ' '.join(doc_tokens[new_start : (new_end + 1)])\n",
    "            if text_span == tok_answer_text:\n",
    "                return (new_start, new_end)\n",
    "\n",
    "    return (input_start, input_end)\n",
    "\n",
    "\n",
    "def _check_is_max_context(doc_spans, cur_span_index, position):\n",
    "    \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n",
    "\n",
    "    # Because of the sliding window approach taken to scoring documents, a single\n",
    "    # token can appear in multiple documents. E.g.\n",
    "    #  Doc: the man went to the store and bought a gallon of milk\n",
    "    #  Span A: the man went to the\n",
    "    #  Span B: to the store and bought\n",
    "    #  Span C: and bought a gallon of\n",
    "    #  ...\n",
    "    #\n",
    "    # Now the word 'bought' will have two scores from spans B and C. We only\n",
    "    # want to consider the score with \"maximum context\", which we define as\n",
    "    # the *minimum* of its left and right context (the *sum* of left and\n",
    "    # right context will always be the same, of course).\n",
    "    #\n",
    "    # In the example the maximum context for 'bought' would be span C since\n",
    "    # it has 1 left context and 3 right context, while span B has 4 left context\n",
    "    # and 0 right context.\n",
    "    best_score = None\n",
    "    best_span_index = None\n",
    "    for (span_index, doc_span) in enumerate(doc_spans):\n",
    "        end = doc_span.start + doc_span.length - 1\n",
    "        if position < doc_span.start:\n",
    "            continue\n",
    "        if position > end:\n",
    "            continue\n",
    "        num_left_context = position - doc_span.start\n",
    "        num_right_context = end - position\n",
    "        score = (\n",
    "            min(num_left_context, num_right_context) + 0.01 * doc_span.length\n",
    "        )\n",
    "        if best_score is None or score > best_score:\n",
    "            best_score = score\n",
    "            best_span_index = span_index\n",
    "\n",
    "    return cur_span_index == best_span_index\n",
    "\n",
    "def get_final_text(pred_text, orig_text, do_lower_case):\n",
    "    \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n",
    "\n",
    "    # When we created the data, we kept track of the alignment between original\n",
    "    # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n",
    "    # now `orig_text` contains the span of our original text corresponding to the\n",
    "    # span that we predicted.\n",
    "    #\n",
    "    # However, `orig_text` may contain extra characters that we don't want in\n",
    "    # our prediction.\n",
    "    #\n",
    "    # For example, let's say:\n",
    "    #   pred_text = steve smith\n",
    "    #   orig_text = Steve Smith's\n",
    "    #\n",
    "    # We don't want to return `orig_text` because it contains the extra \"'s\".\n",
    "    #\n",
    "    # We don't want to return `pred_text` because it's already been normalized\n",
    "    # (the SQuAD eval script also does punctuation stripping/lower casing but\n",
    "    # our tokenizer does additional normalization like stripping accent\n",
    "    # characters).\n",
    "    #\n",
    "    # What we really want to return is \"Steve Smith\".\n",
    "    #\n",
    "    # Therefore, we have to apply a semi-complicated alignment heruistic between\n",
    "    # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n",
    "    # can fail in certain cases in which case we just return `orig_text`.\n",
    "\n",
    "    def _strip_spaces(text):\n",
    "        ns_chars = []\n",
    "        ns_to_s_map = collections.OrderedDict()\n",
    "        for (i, c) in enumerate(text):\n",
    "            if c == ' ':\n",
    "                continue\n",
    "            ns_to_s_map[len(ns_chars)] = i\n",
    "            ns_chars.append(c)\n",
    "        ns_text = ''.join(ns_chars)\n",
    "        return (ns_text, ns_to_s_map)\n",
    "\n",
    "    # We first tokenize `orig_text`, strip whitespace from the result\n",
    "    # and `pred_text`, and check if they are the same length. If they are\n",
    "    # NOT the same length, the heuristic has failed. If they are the same\n",
    "    # length, we assume the characters are one-to-one aligned.\n",
    "    tokenizer = tokenization.BasicTokenizer(do_lower_case = do_lower_case)\n",
    "\n",
    "    tok_text = ' '.join(tokenizer.tokenize(orig_text))\n",
    "\n",
    "    start_position = tok_text.find(pred_text)\n",
    "    if start_position == -1:\n",
    "        return orig_text\n",
    "    end_position = start_position + len(pred_text) - 1\n",
    "\n",
    "    (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n",
    "    (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n",
    "\n",
    "    if len(orig_ns_text) != len(tok_ns_text):\n",
    "        return orig_text\n",
    "\n",
    "    # We then project the characters in `pred_text` back to `orig_text` using\n",
    "    # the character-to-character alignment.\n",
    "    tok_s_to_ns_map = {}\n",
    "    for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n",
    "        tok_s_to_ns_map[tok_index] = i\n",
    "\n",
    "    orig_start_position = None\n",
    "    if start_position in tok_s_to_ns_map:\n",
    "        ns_start_position = tok_s_to_ns_map[start_position]\n",
    "        if ns_start_position in orig_ns_to_s_map:\n",
    "            orig_start_position = orig_ns_to_s_map[ns_start_position]\n",
    "\n",
    "    if orig_start_position is None:\n",
    "        return orig_text\n",
    "\n",
    "    orig_end_position = None\n",
    "    if end_position in tok_s_to_ns_map:\n",
    "        ns_end_position = tok_s_to_ns_map[end_position]\n",
    "        if ns_end_position in orig_ns_to_s_map:\n",
    "            orig_end_position = orig_ns_to_s_map[ns_end_position]\n",
    "\n",
    "    if orig_end_position is None:\n",
    "        return orig_text\n",
    "\n",
    "    output_text = orig_text[orig_start_position : (orig_end_position + 1)]\n",
    "    return output_text\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_length = 384\n",
    "doc_stride = 128\n",
    "max_query_length = 64\n",
    "import collections\n",
    "\n",
    "def example_feature(examples, is_training = True):\n",
    "    inputs_ids, input_masks, segment_ids, start_positions, end_positions = [], [], [], [], []\n",
    "    token_to_orig_maps, token_is_max_contexts, tokenss = [], [], []\n",
    "    indices = []\n",
    "    for (example_index, example) in enumerate(examples):\n",
    "        query_tokens = tokenizer.tokenize(example.question_text)\n",
    "        \n",
    "        if len(query_tokens) > max_query_length:\n",
    "            query_tokens = query_tokens[:max_query_length]\n",
    "            \n",
    "        tok_to_orig_index = []\n",
    "        orig_to_tok_index = []\n",
    "        all_doc_tokens = []\n",
    "        for (i, token) in enumerate(example.doc_tokens):\n",
    "            orig_to_tok_index.append(len(all_doc_tokens))\n",
    "            sub_tokens = tokenizer.tokenize(token)\n",
    "            for sub_token in sub_tokens:\n",
    "                tok_to_orig_index.append(i)\n",
    "                all_doc_tokens.append(sub_token)\n",
    "        \n",
    "        tok_start_position = None\n",
    "        tok_end_position = None\n",
    "        if is_training and example.is_impossible:\n",
    "            tok_start_position = -1\n",
    "            tok_end_position = -1\n",
    "        if is_training and not example.is_impossible:\n",
    "            tok_start_position = orig_to_tok_index[example.start_position]\n",
    "            if example.end_position < len(example.doc_tokens) - 1:\n",
    "                tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n",
    "            else:\n",
    "                tok_end_position = len(all_doc_tokens) - 1\n",
    "            (tok_start_position, tok_end_position) = _improve_answer_span(\n",
    "                  all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n",
    "                  example.orig_answer_text)\n",
    "        \n",
    "        max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n",
    "        _DocSpan = collections.namedtuple(\"DocSpan\", [\"start\", \"length\"])\n",
    "        doc_spans = []\n",
    "        start_offset = 0\n",
    "        while start_offset < len(all_doc_tokens):\n",
    "            length = len(all_doc_tokens) - start_offset\n",
    "            if length > max_tokens_for_doc:\n",
    "                length = max_tokens_for_doc\n",
    "            doc_spans.append(_DocSpan(start=start_offset, length=length))\n",
    "            if start_offset + length == len(all_doc_tokens):\n",
    "                break\n",
    "            start_offset += min(length, doc_stride)\n",
    "        \n",
    "        for (doc_span_index, doc_span) in enumerate(doc_spans):\n",
    "            tokens = []\n",
    "            token_to_orig_map = {}\n",
    "            token_is_max_context = {}\n",
    "            segment_id = []\n",
    "            tokens.append('[CLS]')\n",
    "            segment_id.append(0)\n",
    "            for token in query_tokens:\n",
    "                tokens.append(token)\n",
    "                segment_id.append(0)\n",
    "            tokens.append('[SEP]')\n",
    "            segment_id.append(0)\n",
    "            \n",
    "            for i in range(doc_span.length):\n",
    "                split_token_index = doc_span.start + i\n",
    "                token_to_orig_map[len(tokens)] = tok_to_orig_index[\n",
    "                    split_token_index\n",
    "                ]\n",
    "\n",
    "                is_max_context = _check_is_max_context(\n",
    "                    doc_spans, doc_span_index, split_token_index\n",
    "                )\n",
    "                token_is_max_context[len(tokens)] = is_max_context\n",
    "                tokens.append(all_doc_tokens[split_token_index])\n",
    "                segment_id.append(1)\n",
    "            tokens.append('[SEP]')\n",
    "            segment_id.append(1)\n",
    "            \n",
    "            input_id = tokenizer.convert_tokens_to_ids(tokens)\n",
    "            input_mask = [1] * len(input_id)\n",
    "            \n",
    "            while len(input_id) < max_seq_length:\n",
    "                input_id.append(0)\n",
    "                input_mask.append(0)\n",
    "                segment_id.append(0)\n",
    "                \n",
    "            assert len(input_id) == max_seq_length\n",
    "            assert len(input_mask) == max_seq_length\n",
    "            assert len(segment_id) == max_seq_length\n",
    "            \n",
    "            start_position = None\n",
    "            end_position = None\n",
    "            if is_training and not example.is_impossible:\n",
    "                # For training, if our document chunk does not contain an annotation\n",
    "                # we throw it out, since there is nothing to predict.\n",
    "                doc_start = doc_span.start\n",
    "                doc_end = doc_span.start + doc_span.length - 1\n",
    "                out_of_span = False\n",
    "                if not (\n",
    "                    tok_start_position >= doc_start\n",
    "                    and tok_end_position <= doc_end\n",
    "                ):\n",
    "                    out_of_span = True\n",
    "                if out_of_span:\n",
    "                    start_position = 0\n",
    "                    end_position = 0\n",
    "                else:\n",
    "                    doc_offset = len(query_tokens) + 2\n",
    "                    start_position = tok_start_position - doc_start + doc_offset\n",
    "                    end_position = tok_end_position - doc_start + doc_offset\n",
    "\n",
    "            if is_training and example.is_impossible:\n",
    "                start_position = 0\n",
    "                end_position = 0\n",
    "            \n",
    "            inputs_ids.append(input_id)\n",
    "            input_masks.append(input_mask)\n",
    "            segment_ids.append(segment_id)\n",
    "            start_positions.append(start_position)\n",
    "            end_positions.append(end_position)\n",
    "            token_is_max_contexts.append(token_is_max_context)\n",
    "            token_to_orig_maps.append(token_to_orig_map)\n",
    "            tokenss.append(tokens)\n",
    "            indices.append(example_index)\n",
    "    return (inputs_ids, input_masks, segment_ids, start_positions, \n",
    "            end_positions, token_to_orig_maps, token_is_max_contexts, tokenss, indices)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_inputs_ids, train_input_masks, train_segment_ids, \\\n",
    "train_start_positions, train_end_positions, \\\n",
    "train_token_to_orig_maps, train_token_is_max_contexts, train_tokens, train_indices = example_feature(squad_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_inputs_ids, test_input_masks, test_segment_ids, \\\n",
    "test_start_positions, test_end_positions, \\\n",
    "test_token_to_orig_maps, test_token_is_max_contexts, test_tokens, test_indices = example_feature(squad_test, False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "epoch = 4\n",
    "batch_size = 12\n",
    "warmup_proportion = 0.1\n",
    "n_best_size = 20\n",
    "num_train_steps = int(len(train_inputs_ids) / batch_size * epoch)\n",
    "num_warmup_steps = int(num_train_steps * warmup_proportion)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model:\n",
    "    def __init__(\n",
    "        self,\n",
    "        learning_rate = 2e-5,\n",
    "    ):\n",
    "        self.X = tf.placeholder(tf.int32, [None, None])\n",
    "        self.segment_ids = tf.placeholder(tf.int32, [None, None])\n",
    "        self.input_masks = tf.placeholder(tf.int32, [None, None])\n",
    "        self.start_positions = tf.placeholder(tf.int32, [None])\n",
    "        self.end_positions = tf.placeholder(tf.int32, [None])\n",
    "        \n",
    "        model = modeling.BertModel(\n",
    "            config=bert_config,\n",
    "            is_training=True,\n",
    "            input_ids=self.X,\n",
    "            input_mask=self.input_masks,\n",
    "            token_type_ids=self.segment_ids,\n",
    "            use_one_hot_embeddings=False)\n",
    "        \n",
    "        final_hidden = model.get_sequence_output()\n",
    "        final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n",
    "        batch_size = final_hidden_shape[0]\n",
    "        seq_length = final_hidden_shape[1]\n",
    "        hidden_size = final_hidden_shape[2]\n",
    "        \n",
    "        output_weights = tf.get_variable(\n",
    "            \"cls/squad/output_weights\", [2, hidden_size],\n",
    "            initializer=tf.truncated_normal_initializer(stddev=0.02))\n",
    "\n",
    "        output_bias = tf.get_variable(\n",
    "              \"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n",
    "\n",
    "        final_hidden_matrix = tf.reshape(final_hidden,\n",
    "                                           [batch_size * seq_length, hidden_size])\n",
    "        logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n",
    "        logits = tf.nn.bias_add(logits, output_bias)\n",
    "\n",
    "        logits = tf.reshape(logits, [batch_size, seq_length, 2])\n",
    "        logits = tf.transpose(logits, [2, 0, 1])\n",
    "\n",
    "        unstacked_logits = tf.unstack(logits, axis=0)\n",
    "\n",
    "        (self.start_logits, self.end_logits) = (unstacked_logits[0], unstacked_logits[1])\n",
    "        print(self.start_logits, self.end_logits)\n",
    "        \n",
    "        def compute_loss(logits, positions):\n",
    "            one_hot_positions = tf.one_hot(\n",
    "                positions, depth=seq_length, dtype=tf.float32)\n",
    "            log_probs = tf.nn.log_softmax(logits, axis=-1)\n",
    "            loss = -tf.reduce_mean(\n",
    "                tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n",
    "            return loss\n",
    "        \n",
    "        start_loss = compute_loss(self.start_logits, self.start_positions)\n",
    "        end_loss = compute_loss(self.end_logits, self.end_positions)\n",
    "\n",
    "        self.cost = (start_loss + end_loss) / 2.0\n",
    "    \n",
    "        self.optimizer = optimization.create_optimizer(self.cost, learning_rate, \n",
    "                                                       num_train_steps, num_warmup_steps, False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n",
      "\n",
      "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n",
      "WARNING:tensorflow:From /home/jupyter/.local/lib/python3.6/site-packages/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
      "WARNING:tensorflow:From /home/jupyter/.local/lib/python3.6/site-packages/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use keras.layers.dense instead.\n",
      "Tensor(\"unstack:0\", shape=(?, ?), dtype=float32) Tensor(\"unstack:1\", shape=(?, ?), dtype=float32)\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/learning_rate_decay_v2.py:321: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Deprecated in favor of operator or tf.math.divide.\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n",
      "INFO:tensorflow:Restoring parameters from uncased_L-12_H-768_A-12/bert_model.ckpt\n"
     ]
    }
   ],
   "source": [
    "learning_rate = 2e-5\n",
    "\n",
    "tf.reset_default_graph()\n",
    "sess = tf.InteractiveSession()\n",
    "model = Model(\n",
    "    learning_rate\n",
    ")\n",
    "\n",
    "sess.run(tf.global_variables_initializer())\n",
    "var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'bert')\n",
    "saver = tf.train.Saver(var_list = var_lists)\n",
    "saver.restore(sess, BERT_INIT_CHKPNT)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train minibatch loop: 100%|██████████| 7387/7387 [46:42<00:00,  2.81it/s, cost=1.21]  \n",
      "train minibatch loop:   0%|          | 0/7387 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0, training loss: 1.642079\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train minibatch loop: 100%|██████████| 7387/7387 [46:43<00:00,  2.80it/s, cost=0.477] \n",
      "train minibatch loop:   0%|          | 0/7387 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 1, training loss: 0.863868\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train minibatch loop: 100%|██████████| 7387/7387 [46:42<00:00,  2.81it/s, cost=0.598]  \n",
      "train minibatch loop:   0%|          | 0/7387 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 2, training loss: 0.631467\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train minibatch loop: 100%|██████████| 7387/7387 [46:43<00:00,  2.80it/s, cost=0.073]  "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 3, training loss: 0.509195\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "for e in range(epoch):\n",
    "    train_loss = 0\n",
    "    pbar = tqdm(\n",
    "        range(0, len(train_inputs_ids), batch_size), desc = 'train minibatch loop'\n",
    "    )\n",
    "    for i in pbar:\n",
    "        index = min(i + batch_size, len(train_inputs_ids))\n",
    "        batch_ids = train_inputs_ids[i: index]\n",
    "        batch_masks = train_input_masks[i: index]\n",
    "        batch_segment = train_segment_ids[i: index]\n",
    "        batch_start = train_start_positions[i: index]\n",
    "        batch_end = train_end_positions[i: index]\n",
    "        cost, _ = sess.run(\n",
    "            [model.cost, model.optimizer],\n",
    "            feed_dict = {\n",
    "                model.start_positions: batch_start,\n",
    "                model.end_positions: batch_end,\n",
    "                model.X: batch_ids,\n",
    "                model.segment_ids: batch_segment,\n",
    "                model.input_masks: batch_masks\n",
    "            },\n",
    "        )\n",
    "        pbar.set_postfix(cost = cost)\n",
    "        train_loss += cost\n",
    "    train_loss /= len(train_inputs_ids) / batch_size\n",
    "    print(\n",
    "        'epoch: %d, training loss: %f\\n'\n",
    "        % (e, train_loss)\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "10833"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(test_inputs_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_ids = test_inputs_ids[:10]\n",
    "batch_masks = test_input_masks[:10]\n",
    "batch_segment = test_segment_ids[:10]\n",
    "batch_start = test_start_positions[:10]\n",
    "batch_end = test_end_positions[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [],
   "source": [
    "p = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [],
   "source": [
    "p.extend(start_logits.tolist())\n",
    "p.extend(start_logits.tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(20, 384)"
      ]
     },
     "execution_count": 108,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.array(p).shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "test minibatch loop:   0%|          | 0/903 [00:00<?, ?it/s]\u001b[A\n",
      "test minibatch loop: 100%|██████████| 903/903 [02:00<00:00,  8.02it/s]\n"
     ]
    }
   ],
   "source": [
    "starts, ends = [], []\n",
    "pbar = tqdm(\n",
    "        range(0, len(test_inputs_ids), batch_size), desc = 'test minibatch loop'\n",
    "    )\n",
    "for i in pbar:\n",
    "    index = min(i + batch_size, len(test_inputs_ids))\n",
    "    batch_ids = test_inputs_ids[i: index]\n",
    "    batch_masks = test_input_masks[i: index]\n",
    "    batch_segment = test_segment_ids[i: index]\n",
    "    start_logits, end_logits = sess.run(\n",
    "                [model.start_logits, model.end_logits],\n",
    "                feed_dict = {\n",
    "                    model.X: batch_ids,\n",
    "                    model.segment_ids: batch_segment,\n",
    "                    model.input_masks: batch_masks\n",
    "                },\n",
    "            )\n",
    "    starts.extend(start_logits.tolist())\n",
    "    ends.extend(end_logits.tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "def _get_best_indexes(logits, n_best_size):\n",
    "    index_and_score = sorted(\n",
    "        enumerate(logits), key = lambda x: x[1], reverse = True\n",
    "    )\n",
    "\n",
    "    best_indexes = []\n",
    "    for i in range(len(index_and_score)):\n",
    "        if i >= n_best_size:\n",
    "            break\n",
    "        best_indexes.append(index_and_score[i][0])\n",
    "    return best_indexes\n",
    "\n",
    "def _compute_softmax(scores):\n",
    "    \"\"\"Compute softmax probability over raw logits.\"\"\"\n",
    "    if not scores:\n",
    "        return []\n",
    "\n",
    "    max_score = None\n",
    "    for score in scores:\n",
    "        if max_score is None or score > max_score:\n",
    "            max_score = score\n",
    "\n",
    "    exp_scores = []\n",
    "    total_sum = 0.0\n",
    "    for score in scores:\n",
    "        x = math.exp(score - max_score)\n",
    "        exp_scores.append(x)\n",
    "        total_sum += x\n",
    "\n",
    "    probs = []\n",
    "    for score in exp_scores:\n",
    "        probs.append(score / total_sum)\n",
    "    return probs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 177,
   "metadata": {},
   "outputs": [],
   "source": [
    "def to_predict(\n",
    "    indices,\n",
    "    examples,\n",
    "    start_logits,\n",
    "    end_logits,\n",
    "    tokens,\n",
    "    token_to_orig_maps,\n",
    "    token_is_max_contexts,\n",
    "    max_answer_length = 30,\n",
    "    n_best_size = 20,\n",
    "    do_lower_case = False,\n",
    "    null_score_diff_threshold = 0.0,\n",
    "    output_prediction_file = 'predictions.json',\n",
    "    output_nbest_file = 'nbest_predictions.json',\n",
    "    output_null_log_odds_file = 'null_odds.json',\n",
    "):\n",
    "\n",
    "    example_index_to_features = collections.defaultdict(list)\n",
    "    for no, feature in enumerate(indices):\n",
    "        example_index_to_features[feature].append(no)\n",
    "\n",
    "    all_predictions = collections.OrderedDict()\n",
    "    all_nbest_json = collections.OrderedDict()\n",
    "    scores_diff_json = collections.OrderedDict()\n",
    "\n",
    "    _PrelimPrediction = collections.namedtuple(\n",
    "        'PrelimPrediction',\n",
    "        [\n",
    "            'feature_index',\n",
    "            'start_index',\n",
    "            'end_index',\n",
    "            'start_logit',\n",
    "            'end_logit',\n",
    "        ],\n",
    "    )\n",
    "\n",
    "    for (example_index, example) in enumerate(examples):\n",
    "        features = example_index_to_features[example_index]\n",
    "        prelim_predictions = []\n",
    "        score_null = 1000000\n",
    "        min_null_feature_index = 0\n",
    "        null_start_logit = 0\n",
    "        null_end_logit = 0\n",
    "        for (feature_index, i) in enumerate(features):\n",
    "\n",
    "            start_indexes = _get_best_indexes(start_logits[i], n_best_size)\n",
    "            end_indexes = _get_best_indexes(end_logits[i], n_best_size)\n",
    "            if version_2_with_negative:\n",
    "                feature_null_score = start_logits[i][0] + end_logits[i][0]\n",
    "                if feature_null_score < score_null:\n",
    "                    score_null = feature_null_score\n",
    "                    min_null_feature_index = feature_index\n",
    "                    null_start_logit = start_logits[i][0]\n",
    "                    null_end_logit = end_logits[i][0]\n",
    "            for start_index in start_indexes:\n",
    "                for end_index in end_indexes:\n",
    "                    if start_index >= len(tokens[i]):\n",
    "                        continue\n",
    "                    if end_index >= len(tokens[i]):\n",
    "                        continue\n",
    "                    if start_index not in token_to_orig_maps[i]:\n",
    "                        continue\n",
    "                    if end_index not in token_to_orig_maps[i]:\n",
    "                        continue\n",
    "                    if not token_is_max_contexts[i].get(start_index, False):\n",
    "                        continue\n",
    "                    if end_index < start_index:\n",
    "                        continue\n",
    "                    length = end_index - start_index + 1\n",
    "                    if length > max_answer_length:\n",
    "                        continue\n",
    "                    prelim_predictions.append(\n",
    "                        _PrelimPrediction(\n",
    "                            feature_index = i,\n",
    "                            start_index = start_index,\n",
    "                            end_index = end_index,\n",
    "                            start_logit = start_logits[i][start_index],\n",
    "                            end_logit = end_logits[i][end_index],\n",
    "                        )\n",
    "                    )\n",
    "        if version_2_with_negative:\n",
    "            prelim_predictions.append(\n",
    "                _PrelimPrediction(\n",
    "                    feature_index = min_null_feature_index,\n",
    "                    start_index = 0,\n",
    "                    end_index = 0,\n",
    "                    start_logit = null_start_logit,\n",
    "                    end_logit = null_end_logit,\n",
    "                )\n",
    "            )\n",
    "\n",
    "        prelim_predictions = sorted(\n",
    "            prelim_predictions,\n",
    "            key = lambda x: (x.start_logit + x.end_logit),\n",
    "            reverse = True,\n",
    "        )\n",
    "\n",
    "        _NbestPrediction = collections.namedtuple(\n",
    "            'NbestPrediction', ['text', 'start_logit', 'end_logit']\n",
    "        )\n",
    "\n",
    "        seen_predictions = {}\n",
    "        nbest = []\n",
    "        for pred in prelim_predictions:\n",
    "            if len(nbest) >= n_best_size:\n",
    "                break\n",
    "            i = pred.feature_index\n",
    "            if pred.start_index > 0:\n",
    "                tok_tokens = tokens[i][pred.start_index : (pred.end_index + 1)]\n",
    "                orig_doc_start = token_to_orig_maps[i][pred.start_index]\n",
    "                orig_doc_end = token_to_orig_maps[i][pred.end_index]\n",
    "                orig_tokens = example.doc_tokens[\n",
    "                    orig_doc_start : (orig_doc_end + 1)\n",
    "                ]\n",
    "                tok_text = ' '.join(tok_tokens)\n",
    "                tok_text = tok_text.replace(' ##', '')\n",
    "                tok_text = tok_text.replace('##', '')\n",
    "                tok_text = tok_text.strip()\n",
    "                tok_text = ' '.join(tok_text.split())\n",
    "                orig_text = ' '.join(orig_tokens)\n",
    "\n",
    "                final_text = get_final_text(tok_text, orig_text, do_lower_case)\n",
    "                if final_text in seen_predictions:\n",
    "                    continue\n",
    "                seen_predictions[final_text] = True\n",
    "            else:\n",
    "                final_text = ''\n",
    "                seen_predictions[final_text] = True\n",
    "            nbest.append(\n",
    "                _NbestPrediction(\n",
    "                    text = final_text,\n",
    "                    start_logit = pred.start_logit,\n",
    "                    end_logit = pred.end_logit,\n",
    "                )\n",
    "            )\n",
    "        if version_2_with_negative:\n",
    "            if '' not in seen_predictions:\n",
    "                nbest.append(\n",
    "                    _NbestPrediction(\n",
    "                        text = '',\n",
    "                        start_logit = null_start_logit,\n",
    "                        end_logit = null_end_logit,\n",
    "                    )\n",
    "                )\n",
    "\n",
    "        if not nbest:\n",
    "            nbest.append(\n",
    "                _NbestPrediction(\n",
    "                    text = 'empty', start_logit = 0.0, end_logit = 0.0\n",
    "                )\n",
    "            )\n",
    "\n",
    "        assert len(nbest) >= 1\n",
    "        total_scores = []\n",
    "        best_non_null_entry = None\n",
    "        for entry in nbest:\n",
    "            total_scores.append(entry.start_logit + entry.end_logit)\n",
    "            if not best_non_null_entry:\n",
    "                if entry.text:\n",
    "                    best_non_null_entry = entry\n",
    "\n",
    "        probs = _compute_softmax(total_scores)\n",
    "\n",
    "        nbest_json = []\n",
    "        for (i, entry) in enumerate(nbest):\n",
    "            output = collections.OrderedDict()\n",
    "            output['text'] = entry.text\n",
    "            output['probability'] = probs[i]\n",
    "            output['start_logit'] = entry.start_logit\n",
    "            output['end_logit'] = entry.end_logit\n",
    "            nbest_json.append(output)\n",
    "\n",
    "        assert len(nbest_json) >= 1\n",
    "        if not version_2_with_negative:\n",
    "            all_predictions[example.qas_id] = nbest_json[0]['text']\n",
    "        else:\n",
    "            score_diff = (\n",
    "                score_null\n",
    "                - best_non_null_entry.start_logit\n",
    "                - (best_non_null_entry.end_logit)\n",
    "            )\n",
    "            scores_diff_json[example.qas_id] = score_diff\n",
    "            if score_diff > null_score_diff_threshold:\n",
    "                all_predictions[example.qas_id] = ''\n",
    "            else:\n",
    "                all_predictions[example.qas_id] = best_non_null_entry.text\n",
    "        all_nbest_json[example.qas_id] = nbest_json\n",
    "\n",
    "    with tf.gfile.GFile(output_prediction_file, 'w') as writer:\n",
    "        writer.write(json.dumps(all_predictions, indent = 4) + '\\n')\n",
    "    with tf.gfile.GFile(output_nbest_file, 'w') as writer:\n",
    "        writer.write(json.dumps(all_nbest_json, indent = 4) + '\\n')\n",
    "    if version_2_with_negative:\n",
    "        with tf.gfile.GFile(output_null_log_odds_file, 'w') as writer:\n",
    "            writer.write(json.dumps(scores_diff_json, indent = 4) + '\\n')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 178,
   "metadata": {},
   "outputs": [],
   "source": [
    "to_predict(test_indices, squad_test, starts, ends,\n",
    "          test_tokens, test_token_to_orig_maps, test_token_is_max_contexts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 184,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--2019-07-02 17:46:28--  https://raw.githubusercontent.com/allenai/bi-att-flow/master/squad/evaluate-v1.1.py\n",
      "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.8.133\n",
      "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.8.133|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 3419 (3.3K) [text/plain]\n",
      "Saving to: ‘evaluate-v1.1.py’\n",
      "\n",
      "evaluate-v1.1.py    100%[===================>]   3.34K  --.-KB/s    in 0s      \n",
      "\n",
      "2019-07-02 17:46:29 (99.2 MB/s) - ‘evaluate-v1.1.py’ saved [3419/3419]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "!wget https://raw.githubusercontent.com/allenai/bi-att-flow/master/squad/evaluate-v1.1.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 185,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\"exact_match\": 77.57805108798486, \"f1\": 86.18327335287402}\r\n"
     ]
    }
   ],
   "source": [
    "!python3 evaluate-v1.1.py dev-v1.1.json predictions.json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
