{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true,
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 如何利用转换好的NQ json格式的数据"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 依赖包导入"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "import json\n",
    "import logging\n",
    "import os\n",
    "import collections\n",
    "import pickle\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import numpy as np\n",
    "\n",
    "from transformers.tokenization_bert import whitespace_tokenize\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 日志"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "logger = logging.getLogger(__name__)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 命名元组"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "source": [
    "NQExample = collections.namedtuple(\"NQExample\", [\n",
    "    \"qas_id\", \"question_text\", \"doc_tokens\", \"orig_answer_text\",\n",
    "    \"start_position\", \"end_position\", \"long_position\",\n",
    "    \"short_is_impossible\", \"long_is_impossible\", \"crop_start\"])\n",
    "\n",
    "Crop = collections.namedtuple(\"Crop\", [\"unique_id\", \"example_index\", \"doc_span_index\",\n",
    "    \"tokens\", \"token_to_orig_map\", \"token_is_max_context\",\n",
    "    \"input_ids\", \"attention_mask\", \"token_type_ids\",\n",
    "    # \"p_mask\",\n",
    "    \"paragraph_len\", \"start_position\", \"end_position\", \"long_position\",\n",
    "    \"short_is_impossible\", \"long_is_impossible\"])\n",
    "\n",
    "LongAnswerCandidate = collections.namedtuple('LongAnswerCandidate', [\n",
    "    'start_token', 'end_token', 'top_level'])\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% \n"
    }
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 命名元组说明\n",
    "\n",
    "- NQExample构成\n",
    "\n",
    "      qas_id(问题的id)\n",
    "      question_text(问题的文本)\n",
    "      doc_tokens(阅读材料)\n",
    "      orig_answer_text(原始回答的文本)\n",
    "      start_position(文本中的开始位置)\n",
    "      end_position(文本中的结束位置)\n",
    "      long_position\n",
    "      short_is_impossible(不可短回答)\n",
    "      long_is_impossible(不可长回答)\n",
    "      crop_start\n",
    "      \n",
    "- Crop构成(由源SQuAD2.0格式数据转换的特征结构)\n",
    "\n",
    "      unique_id(特征唯一id)\n",
    "      example_index(样本索引)\n",
    "      doc_span_index(特征在doc_span中的索引)\n",
    "      tokens(样本的token序列)\n",
    "      token_to_orig_map(tokens里面每一个token在原始doc_token的索引)\n",
    "      token_is_max_context(序列，里面的值表示该位置的token在当前doc_span里面是否是上下文最全的)\n",
    "      input_ids(tokens转化为token ids作为模型的输入)\n",
    "      attention_mask(mask)\n",
    "      token_type_ids(加入的token类型指示)\n",
    "      paragraph_len(doc_span的长度)\n",
    "      start_position(答案在当前tokens序列里面的起始位置)\n",
    "      end_position(答案在当前tokens序列里面的结束位置)\n",
    "      long_position\n",
    "      short_is_impossible(不可短回答)\n",
    "      long_is_impossible(不可长回答)\n",
    "      \n",
    "- LongAnswerCandidate构成\n",
    "\n",
    "      start_token(起始token的位置)\n",
    "      end_token(结束token的位置)\n",
    "      top_level(顶级数据标识)\n",
    "      \n",
    "      "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 一些变量定义"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "UNMAPPED = -123\n",
    "CLS_INDEX = 0"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 处理函数说明"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### get_add_tokens()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def get_add_tokens(do_enumerate):\n",
    "    tags = ['Dd', 'Dl', 'Dt', 'H1', 'H2', 'H3', 'Li', 'Ol', 'P', 'Table', 'Td', 'Th', 'Tr', 'Ul']\n",
    "    opening_tags = [f'<{tag}>' for tag in tags]\n",
    "    closing_tags = [f'</{tag}>' for tag in tags]\n",
    "    added_tags = opening_tags + closing_tags\n",
    "    # See `nq_to_sqaud.py` for special-tokens\n",
    "    special_tokens = ['<P>', '<Table>']\n",
    "    if do_enumerate:\n",
    "        for special_token in special_tokens:\n",
    "            for j in range(11):\n",
    "              added_tags.append(f'<{special_token[1: -1]}{j}>')\n",
    "\n",
    "    add_tokens = ['Td_colspan', 'Th_colspan', '``', '\\'\\'', '--']\n",
    "    add_tokens = add_tokens + added_tags\n",
    "    return add_tokens"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### get_add_token()函数\n",
    "    输入: do_enumerate(boolean)--要处理的数据是否已经过文段位置的特殊标记处理\n",
    "    函数流程: 把开闭标签列表合并，如果do_enumerate为真，即已经为文段进行了位置标记处理，\n",
    "    则把两个特殊的special_tags也加到列表末尾，再再列表前加入一些必须的token，\n",
    "    最终形成要返回的add_tokens\n",
    "    返回: add_tokens(list)--包含很多html标签(开闭都有)\n",
    "    作用: 返回值作为tokenizer.add_tokens()的输入，为分词添加一些额外的token\n",
    "    "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### find_closing_tag()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def find_closing_tag(tokens, opening_tag):\n",
    "    closing_tag = f'</{opening_tag[1: -1]}>'\n",
    "    index, stack = -1, []\n",
    "    for token_index, token in enumerate(tokens):\n",
    "        if token == opening_tag:\n",
    "            stack.insert(0, opening_tag)\n",
    "        elif token == closing_tag:\n",
    "            stack.pop()\n",
    "\n",
    "        if len(stack) == 0:\n",
    "            index = token_index\n",
    "            break\n",
    "    return index"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### find_closing_tag()函数\n",
    "    输入: tokens和opening_tags(开标签)\n",
    "    函数流程: 根据当前输入的opening_tag构造closing_tag，对输入的tokens进行枚举，\n",
    "    将碰到的相同的opening_tag入栈，碰到的对应的closing_tag出栈，若最终栈中无元素，\n",
    "    说明此时匹配到了当前位置opening_tag对应的closing_tag，返回此时的closing_tag\n",
    "    的下标。\n",
    "    输出: 当前opening_tag对应到的closing的token下标\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### read_candidates()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def read_candidates(candidate_files, do_cache=True):\n",
    "    assert isinstance(candidate_files, (tuple, list)), candidate_files\n",
    "    for fn in candidate_files:\n",
    "        assert os.path.exists(fn), f'Missing file {fn}'\n",
    "    cache_fn = 'candidates.pkl'\n",
    "\n",
    "    candidates = {}\n",
    "    if not os.path.exists(cache_fn):\n",
    "        for fn in candidate_files:\n",
    "            with open(fn) as f:\n",
    "                for line in tqdm(f):\n",
    "                    entry = json.loads(line)\n",
    "                    example_id = str(entry['example_id'])\n",
    "                    cnds = entry.pop('long_answer_candidates')\n",
    "                    cnds = [LongAnswerCandidate(c['start_token'], c['end_token'],\n",
    "                            c['top_level']) for c in cnds]\n",
    "                    candidates[example_id] = cnds\n",
    "\n",
    "        if do_cache:\n",
    "            with open(cache_fn, 'wb') as f:\n",
    "                pickle.dump(candidates, f)\n",
    "    else:\n",
    "        print(f'Loading from cache: {cache_fn}')\n",
    "        with open(cache_fn, 'rb') as f:\n",
    "            candidates = pickle.load(f)\n",
    "\n",
    "    return candidates\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### read_candidates()函数\n",
    "    输入: candidate_files(list(str))(源文件(训练集+测试集)路径)\n",
    "    流程: 先检查是否有缓存文件candidates.pkl，如果该缓存文件不存在，\n",
    "    则从源文件中首次读取出全部example下的long_answer_candidates字段\n",
    "    的数据，以LongAnswerCandidate命名元组格式进行存储，并与所属的\n",
    "    example_id构成键值对放入候选答案的字典中。首次从源文件中读取数据时，\n",
    "    需要生成缓存文件。方便下次直接从缓存文件中载入数据。\n",
    "    返回: 候选长回答的字典\n",
    "    作用: 方便快速搜寻每个example的候选长回答"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### is_whitespace()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def is_whitespace(c):\n",
    "    if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n",
    "        return True\n",
    "    return False"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### is_whitespace()函数\n",
    "    输入: 字符c\n",
    "    流程: 判断当前字符是否是空格符\" \"，缩进符\"\\t\"，光标回退符\"\\r\",\n",
    "    换行符\"\\n\"或者是字符的ASCII值为0x202f(据说是蒙古文空格)\n",
    "    输出: boolean值\n",
    "    作用: 在读取NQ例子时，用作doc字符是否是空格的判断\n",
    "    "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### read_nq_examples()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def read_nq_examples(input_file_or_data, is_training):\n",
    "    \"\"\"Read a NQ json file into a list of NQExample. Refer to `nq_to_squad.py`\n",
    "       to convert the `simplified-nq-t*.jsonl` files to NQ json.\"\"\"\n",
    "    if isinstance(input_file_or_data, str):\n",
    "        with open(input_file_or_data, \"r\", encoding='utf-8') as f:\n",
    "            input_data = json.load(f)[\"data\"]\n",
    "\n",
    "    else:\n",
    "        input_data = input_file_or_data\n",
    "\n",
    "    for entry_index, entry in enumerate(tqdm(input_data, total=len(input_data))):\n",
    "        # if entry_index >= 2:\n",
    "        #     break\n",
    "        assert len(entry[\"paragraphs\"]) == 1\n",
    "        paragraph = entry[\"paragraphs\"][0]\n",
    "        paragraph_text = paragraph[\"context\"]\n",
    "        doc_tokens = []\n",
    "        char_to_word_offset = []\n",
    "        prev_is_whitespace = True\n",
    "        for c in paragraph_text:\n",
    "            if is_whitespace(c):\n",
    "                prev_is_whitespace = True\n",
    "            else:\n",
    "                if prev_is_whitespace:\n",
    "                    doc_tokens.append(c)\n",
    "                else:\n",
    "                    doc_tokens[-1] += c\n",
    "                prev_is_whitespace = False\n",
    "            char_to_word_offset.append(len(doc_tokens) - 1)\n",
    "\n",
    "        assert len(paragraph[\"qas\"]) == 1\n",
    "        qa = paragraph[\"qas\"][0]\n",
    "        start_position = None\n",
    "        end_position = None\n",
    "        long_position = None\n",
    "        orig_answer_text = None\n",
    "        short_is_impossible = False\n",
    "        long_is_impossible = False\n",
    "        if is_training:\n",
    "            short_is_impossible = qa[\"short_is_impossible\"]\n",
    "            short_answers = qa[\"short_answers\"]\n",
    "            if len(short_answers) >= 2:\n",
    "                # logger.info(f\"Choosing leftmost of \"\n",
    "                #     f\"{len(short_answers)} short answer\")\n",
    "                short_answers = sorted(short_answers, key=lambda sa: sa[\"answer_start\"])\n",
    "                short_answers = short_answers[0: 1]\n",
    "\n",
    "            if not short_is_impossible:\n",
    "                answer = short_answers[0]\n",
    "                orig_answer_text = answer[\"text\"]\n",
    "                answer_offset = answer[\"answer_start\"]\n",
    "                answer_length = len(orig_answer_text)\n",
    "                start_position = char_to_word_offset[answer_offset]\n",
    "                end_position = char_to_word_offset[\n",
    "                    answer_offset + answer_length - 1]\n",
    "                # Only add answers where the text can be exactly\n",
    "                # recovered from the document. If this CAN'T\n",
    "                # happen it's likely due to weird Unicode stuff\n",
    "                # so we will just skip the example.\n",
    "                #\n",
    "                # Note that this means for training mode, every\n",
    "                # example is NOT guaranteed to be preserved.\n",
    "                actual_text = \" \".join(doc_tokens[start_position:\n",
    "                    end_position + 1])\n",
    "                cleaned_answer_text = \" \".join(\n",
    "                    whitespace_tokenize(orig_answer_text))\n",
    "                if actual_text.find(cleaned_answer_text) == -1:\n",
    "                    logger.warning(\n",
    "                        \"Could not find answer: '%s' vs. '%s'\",\n",
    "                        actual_text, cleaned_answer_text)\n",
    "                    continue\n",
    "            else:\n",
    "                start_position = -1\n",
    "                end_position = -1\n",
    "                orig_answer_text = \"\"\n",
    "\n",
    "            long_is_impossible = qa[\"long_is_impossible\"]\n",
    "            long_answers = qa[\"long_answers\"]\n",
    "            if (len(long_answers) != 1) and not long_is_impossible:\n",
    "                raise ValueError(f\"For training, each question\"\n",
    "                    f\" should have exactly 1 long answer.\")\n",
    "\n",
    "            if not long_is_impossible:\n",
    "                long_answer = long_answers[0]\n",
    "                long_answer_offset = long_answer[\"answer_start\"]\n",
    "                long_position = char_to_word_offset[long_answer_offset]\n",
    "            else:\n",
    "                long_position = -1\n",
    "\n",
    "            # print(f'Q:{question_text}')\n",
    "            # print(f'A:{start_position}, {end_position},\n",
    "            # {orig_answer_text}')\n",
    "            # print(f'R:{doc_tokens[start_position: end_position]}')\n",
    "\n",
    "            if not short_is_impossible and not long_is_impossible:\n",
    "                assert long_position <= start_position\n",
    "\n",
    "            if not short_is_impossible and long_is_impossible:\n",
    "                assert False, f'Invalid pair short, long pair'\n",
    "\n",
    "        example = NQExample(\n",
    "            qas_id=qa[\"id\"],\n",
    "            question_text=qa[\"question\"],\n",
    "            doc_tokens=doc_tokens,\n",
    "            orig_answer_text=orig_answer_text,\n",
    "            start_position=start_position,\n",
    "            end_position=end_position,\n",
    "            long_position=long_position,\n",
    "            short_is_impossible=short_is_impossible,\n",
    "            long_is_impossible=long_is_impossible,\n",
    "            crop_start=qa[\"crop_start\"])\n",
    "\n",
    "        yield example\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### read_nq_examples()函数\n",
    "    输入: NQ json格式的文件(str)(由原数据集经nq2squad.py转换而来) + 是否用于训练的标识is_training(boolean)\n",
    "    流程: 打开转换得到的json文件，获取到其中的顶层数据data，然后逐一获取每个转换而来的paragraph下的context字段\n",
    "    对context的文本逐字节进行分隔符(空格缩进等等)筛查，相当于对context按分隔符进行分割，doc_token中存放的是\n",
    "    所有词组成的列表。同时计算context文本中由char到word的偏移量(即记录不同字符对应的不同的词)。\n",
    "    接下来，获取paragraph下的qas字段:\n",
    "    开始处理答案部分，若is_training为真且问题可回答，则获取短答案，若短答案多于两个，则按”answer_start\"进行排序，\n",
    "    若，则取排序后第一个答案，根据前面计算得到的char_to_word偏移计算答案在context中实际的word的开始和结束位置，\n",
    "    把根据开始结束位置截取到的实际的文本转换成字符串的形式。对于长答案，作相似的处理，只是对于训练，每个问题只能有\n",
    "    一个长答案。\n",
    "    输出: 生成example数据(NQExample命名元组的形式)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### 文本扩展"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "DocSpan = collections.namedtuple(\"DocSpan\", [\"start\", \"length\"])"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### get_spans()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def get_spans(doc_stride, max_tokens_for_doc, max_len):\n",
    "    doc_spans = []\n",
    "    start_offset = 0\n",
    "    while start_offset < max_len:\n",
    "        length = max_len - start_offset\n",
    "        if length > max_tokens_for_doc:\n",
    "            length = max_tokens_for_doc\n",
    "        doc_spans.append(DocSpan(start=start_offset, length=length))\n",
    "        if start_offset + length == max_len:\n",
    "            break\n",
    "        start_offset += min(length, doc_stride)\n",
    "    return doc_spans"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### get_spans()函数\n",
    "    输入: 滑动窗口的步数doc_stride + 采用不同模型限定的文档最大长度 +\n",
    "    max_len\n",
    "    过程: 看不懂\n",
    "    输出: 一段doc文本经滑动窗口得到的多个span\n",
    "    e.g\n",
    "    Doc: the man went to the store and bought a gallon of milk\n",
    "    Span A: the man went to the\n",
    "    Span B: to the store and bought\n",
    "    Span C: and bought a gallon of\n",
    "    作用: 解决doc长于预设最大序列长度的问题"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### convert_examples_to_crops()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def convert_examples_to_crops(examples_gen, tokenizer, max_seq_length,\n",
    "                              doc_stride, max_query_length, is_training,\n",
    "                              cls_token='[CLS]', sep_token='[SEP]', pad_id=0,\n",
    "                              sequence_a_segment_id=0,\n",
    "                              sequence_b_segment_id=1,\n",
    "                              cls_token_segment_id=0,\n",
    "                              pad_token_segment_id=0,\n",
    "                              mask_padding_with_zero=True,\n",
    "                              p_keep_impossible=None,\n",
    "                              sep_token_extra=False):\n",
    "    \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n",
    "    assert p_keep_impossible is not None, '`p_keep_impossible` is required'\n",
    "    unique_id = 1000000000\n",
    "    num_short_pos, num_short_neg = 0, 0\n",
    "    num_long_pos, num_long_neg = 0, 0\n",
    "    sub_token_cache = {}\n",
    "    # max_N, max_M = 1024, 1024\n",
    "    # f = np.zeros((max_N, max_M), dtype=np.float32)\n",
    "\n",
    "    crops = []\n",
    "    for example_index, example in enumerate(examples_gen):\n",
    "        if example_index % 1000 == 0 and example_index > 0:\n",
    "            logger.info('Converting %s: short_pos %s short_neg %s'\n",
    "                ' long_pos %s long_neg %s',\n",
    "                example_index, num_short_pos, num_short_neg,\n",
    "                num_long_pos, num_long_neg)\n",
    "\n",
    "        query_tokens = tokenizer.tokenize(example.question_text)\n",
    "        if len(query_tokens) > max_query_length:\n",
    "            query_tokens = query_tokens[0:max_query_length]\n",
    "\n",
    "        # this takes the longest!\n",
    "        tok_to_orig_index = []\n",
    "        orig_to_tok_index = []\n",
    "        all_doc_tokens = []\n",
    "\n",
    "        for i, token in enumerate(example.doc_tokens):\n",
    "            orig_to_tok_index.append(len(all_doc_tokens))\n",
    "            sub_tokens = sub_token_cache.get(token)\n",
    "            if sub_tokens is None:\n",
    "                sub_tokens = tokenizer.tokenize(token)\n",
    "                sub_token_cache[token] = sub_tokens\n",
    "            tok_to_orig_index.extend([i for _ in range(len(sub_tokens))])\n",
    "            all_doc_tokens.extend(sub_tokens)\n",
    "\n",
    "        tok_start_position = None\n",
    "        tok_end_position = None\n",
    "        if is_training and example.short_is_impossible:\n",
    "            tok_start_position = -1\n",
    "            tok_end_position = -1\n",
    "\n",
    "        if is_training and not example.short_is_impossible:\n",
    "            tok_start_position = orig_to_tok_index[example.start_position]\n",
    "            if example.end_position < len(example.doc_tokens) - 1:\n",
    "                tok_end_position = orig_to_tok_index[\n",
    "                    example.end_position + 1] - 1\n",
    "            else:\n",
    "                tok_end_position = len(all_doc_tokens) - 1\n",
    "\n",
    "        tok_long_position = None\n",
    "        if is_training and example.long_is_impossible:\n",
    "            tok_long_position = -1\n",
    "\n",
    "        if is_training and not example.long_is_impossible:\n",
    "            tok_long_position = orig_to_tok_index[example.long_position]\n",
    "\n",
    "        # For Bert: [CLS] question [SEP] paragraph [SEP]\n",
    "        special_tokens_count = 3\n",
    "        if sep_token_extra:\n",
    "            # For Roberta: <s> question </s> </s> paragraph </s>\n",
    "            special_tokens_count += 1\n",
    "        max_tokens_for_doc = max_seq_length - len(query_tokens) - special_tokens_count\n",
    "        assert max_tokens_for_doc > 0\n",
    "        # We can have documents that are longer than the maximum\n",
    "        # sequence length. To deal with this we do a sliding window\n",
    "        # approach, where we take chunks of the up to our max length\n",
    "        # with a stride of `doc_stride`.\n",
    "        doc_spans = get_spans(doc_stride, max_tokens_for_doc, len(all_doc_tokens))\n",
    "        for doc_span_index, doc_span in enumerate(doc_spans):\n",
    "            # Tokens are constructed as: CLS Query SEP Paragraph SEP\n",
    "            tokens = []\n",
    "            token_to_orig_map = UNMAPPED * np.ones((max_seq_length, ), dtype=np.int32)\n",
    "            token_is_max_context = np.zeros((max_seq_length, ), dtype=np.bool)\n",
    "            token_type_ids = []\n",
    "\n",
    "            # p_mask: mask with 1 for token than cannot be in the\n",
    "            # answer (0 for token which can be in an answer)\n",
    "            # Original TF implem also keep the classification token\n",
    "            # (set to 0) (not sure why...)\n",
    "            # p_mask = []\n",
    "\n",
    "            short_is_impossible = example.short_is_impossible\n",
    "            start_position = None\n",
    "            end_position = None\n",
    "            special_tokens_offset = special_tokens_count - 1\n",
    "            doc_offset = len(query_tokens) + special_tokens_offset\n",
    "            if is_training and not short_is_impossible:\n",
    "                doc_start = doc_span.start\n",
    "                doc_end = doc_span.start + doc_span.length - 1\n",
    "                if not (tok_start_position >= doc_start and tok_end_position <= doc_end):\n",
    "                    start_position = 0\n",
    "                    end_position = 0\n",
    "                    short_is_impossible = True\n",
    "                else:\n",
    "                    start_position = tok_start_position - doc_start + doc_offset\n",
    "                    end_position = tok_end_position - doc_start + doc_offset\n",
    "\n",
    "            long_is_impossible = example.long_is_impossible\n",
    "            long_position = None\n",
    "            if is_training and not long_is_impossible:\n",
    "                doc_start = doc_span.start\n",
    "                doc_end = doc_span.start + doc_span.length - 1\n",
    "                # out of span\n",
    "                if not (tok_long_position >= doc_start and tok_long_position <= doc_end):\n",
    "                    long_position = 0\n",
    "                    long_is_impossible = True\n",
    "                else:\n",
    "                    long_position = tok_long_position - doc_start + doc_offset\n",
    "\n",
    "            # drop impossible samples\n",
    "            if long_is_impossible:\n",
    "                if np.random.rand() > p_keep_impossible:\n",
    "                    continue\n",
    "\n",
    "            # CLS token at the beginning\n",
    "            tokens.append(cls_token)\n",
    "            token_type_ids.append(cls_token_segment_id)\n",
    "            # p_mask.append(0)  # can be answer\n",
    "\n",
    "            # Query\n",
    "            tokens += query_tokens\n",
    "            token_type_ids += [sequence_a_segment_id] * len(query_tokens)\n",
    "            # p_mask += [1] * len(query_tokens)  # can not be answer\n",
    "\n",
    "            # SEP token\n",
    "            tokens.append(sep_token)\n",
    "            token_type_ids.append(sequence_a_segment_id)\n",
    "            # p_mask.append(1)  # can not be answer\n",
    "            if sep_token_extra:\n",
    "                tokens.append(sep_token)\n",
    "                token_type_ids.append(sequence_a_segment_id)\n",
    "                # p_mask.append(1)\n",
    "\n",
    "            # Paragraph\n",
    "            for i in range(doc_span.length):\n",
    "                split_token_index = doc_span.start + i\n",
    "                # We add `example.crop_start` as the original document\n",
    "                # is already shifted\n",
    "                token_to_orig_map[len(tokens)] = tok_to_orig_index[\n",
    "                    split_token_index] + example.crop_start\n",
    "\n",
    "                token_is_max_context[len(tokens)] = check_is_max_context(doc_spans,\n",
    "                    doc_span_index, split_token_index)\n",
    "                tokens.append(all_doc_tokens[split_token_index])\n",
    "                token_type_ids.append(sequence_b_segment_id)\n",
    "                # p_mask.append(0)  # can be answer\n",
    "\n",
    "            paragraph_len = doc_span.length\n",
    "\n",
    "            # SEP token\n",
    "            tokens.append(sep_token)\n",
    "            token_type_ids.append(sequence_b_segment_id)\n",
    "            # p_mask.append(1)  # can not be answer\n",
    "\n",
    "            input_ids = tokenizer.convert_tokens_to_ids(tokens)\n",
    "\n",
    "            # The mask has 1 for real tokens and 0 for padding tokens. Only real\n",
    "            # tokens are attended to.\n",
    "            attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n",
    "\n",
    "            # Zero-pad up to the sequence length.\n",
    "            while len(input_ids) < max_seq_length:\n",
    "                input_ids.append(pad_id)\n",
    "                attention_mask.append(0 if mask_padding_with_zero else 1)\n",
    "                token_type_ids.append(pad_token_segment_id)\n",
    "                # p_mask.append(1)  # can not be answer\n",
    "\n",
    "            # reduce memory, only input_ids needs more bits\n",
    "            input_ids = np.array(input_ids, dtype=np.int32)\n",
    "            attention_mask = np.array(attention_mask, dtype=np.bool)\n",
    "            token_type_ids = np.array(token_type_ids, dtype=np.uint8)\n",
    "            # p_mask = np.array(p_mask, dtype=np.bool)\n",
    "\n",
    "            if is_training and short_is_impossible:\n",
    "                start_position = CLS_INDEX\n",
    "                end_position = CLS_INDEX\n",
    "\n",
    "            if is_training and long_is_impossible:\n",
    "                long_position = CLS_INDEX\n",
    "\n",
    "            if example_index in (0, 10):\n",
    "                # too spammy otherwise\n",
    "                if doc_span_index in (0, 5):\n",
    "                    logger.info(\"*** Example ***\")\n",
    "                    logger.info(\"unique_id: %s\" % (unique_id))\n",
    "                    logger.info(\"example_index: %s\" % (example_index))\n",
    "                    logger.info(\"doc_span_index: %s\" % (doc_span_index))\n",
    "                    logger.info(\"tokens: %s\" % \" \".join(tokens))\n",
    "                    # logger.info(\"token_to_orig_map: %s\" % \" \".join([\n",
    "                    #     \"%d:%d\" % (x, y) for (x, y) in enumerate(token_to_orig_map)]))\n",
    "                    # logger.info(\"token_is_max_context: %s\" % \" \".join([\n",
    "                    #     \"%d:%s\" % (x, y) for (x, y) in enumerate(token_is_max_context)\n",
    "                    # ]))\n",
    "                    logger.info(\"input_ids: %s\" % input_ids)\n",
    "                    logger.info(\"attention_mask: %s\" % np.uint8(attention_mask))\n",
    "                    logger.info(\"token_type_ids: %s\" % token_type_ids)\n",
    "                    if is_training and short_is_impossible:\n",
    "                        logger.info(\"short impossible example\")\n",
    "                    if is_training and long_is_impossible:\n",
    "                        logger.info(\"long impossible example\")\n",
    "                    if is_training and not short_is_impossible:\n",
    "                        answer_text = \" \".join(tokens[start_position: end_position + 1])\n",
    "                        logger.info(\"start_position: %d\" % (start_position))\n",
    "                        logger.info(\"end_position: %d\" % (end_position))\n",
    "                        logger.info(\"answer: %s\" % (answer_text))\n",
    "\n",
    "            if short_is_impossible:\n",
    "                num_short_neg += 1\n",
    "            else:\n",
    "                num_short_pos += 1\n",
    "\n",
    "            if long_is_impossible:\n",
    "                num_long_neg += 1\n",
    "            else:\n",
    "                num_long_pos += 1\n",
    "\n",
    "            crop = Crop(\n",
    "                unique_id=unique_id,\n",
    "                example_index=example_index,\n",
    "                doc_span_index=doc_span_index,\n",
    "                tokens=tokens,\n",
    "                token_to_orig_map=token_to_orig_map,\n",
    "                token_is_max_context=token_is_max_context,\n",
    "                input_ids=input_ids,\n",
    "                attention_mask=attention_mask,\n",
    "                token_type_ids=token_type_ids,\n",
    "                # p_mask=p_mask,\n",
    "                paragraph_len=paragraph_len,\n",
    "                start_position=start_position,\n",
    "                end_position=end_position,\n",
    "                long_position=long_position,\n",
    "                short_is_impossible=short_is_impossible,\n",
    "                long_is_impossible=long_is_impossible)\n",
    "            crops.append(crop)\n",
    "            unique_id += 1\n",
    "\n",
    "    return crops"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### convert_examples_to_crops()函数\n",
    "    输入: NQExample + 分词器 + 最大序列长度 + 文本步长 + 最大问题长度 + \n",
    "    训练标识 + 分类token[CLS] + 分句token[SEP] + 一些预设参数\n",
    "    过程: 对读取到的每个NQExample逐一进行crop: \n",
    "    1. 对问题文本question_text进行tokenize, max_query_length为预设最大问题长度\n",
    "    (从问题tokenize后得到的全部token中选取前max_query_length个token作为结果)\n",
    "    2. 对doc_tokens中的token的起止位置进行重新标注\n",
    "    3. 对不同模型(bert和robert)，会在输入问答的句子中加入特殊的token，加上这些\n",
    "    token后，可能会超过预设的最大序列长度，为了匹配上输入的最大序列长度，利用\n",
    "    滑动窗口的方式，对文本进行stride，即文本跳跃(默认256)\n",
    "    4. 对进行doc_stride后得到的多段doc_span进行mask，用0替代可能是答案的位置，用1\n",
    "    替代不可能是答案的位置\n",
    "    5. Tokens序列(即输入特征的格式)按以下方式构建: \n",
    "    [CLS] Query [SEP] Paragraph [SEP]\n",
    "    按构建顺序将[CLS] paragraph [SEP] 加进tokens列表，并在token_type_ids中指示\n",
    "    他们的token类型。然后匹配所有token在词汇表中的索引，进行attetion_mask。\n",
    "    ps: 在构建crops的过程中，以问题是否可答为标识，统计正反例的数目。\n",
    "    输出: Crop命名元组格式的数据\n",
    "    作用: 完成源数据到特征的转换"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### check_is_max_context()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def check_is_max_context(doc_spans, cur_span_index, position):\n",
    "    \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n",
    "\n",
    "    # Because of the sliding window approach taken to scoring documents, a single\n",
    "    # token can appear in multiple documents. E.g.\n",
    "    #  Doc: the man went to the store and bought a gallon of milk\n",
    "    #  Span A: the man went to the\n",
    "    #  Span B: to the store and bought\n",
    "    #  Span C: and bought a gallon of\n",
    "    #  ...\n",
    "    #\n",
    "    # Now the word 'bought' will have two scores from spans B and C. We only\n",
    "    # want to consider the score with \"maximum context\", which we define as\n",
    "    # the *minimum* of its left and right context (the *sum* of left and\n",
    "    # right context will always be the same, of course).\n",
    "    #\n",
    "    # In the example the maximum context for 'bought' would be span C since\n",
    "    # it has 1 left context and 3 right context, while span B has 4 left context\n",
    "    # and 0 right context.\n",
    "    best_score = None\n",
    "    best_span_index = None\n",
    "    for (span_index, doc_span) in enumerate(doc_spans):\n",
    "        end = doc_span.start + doc_span.length - 1\n",
    "        if position < doc_span.start:\n",
    "            continue\n",
    "        if position > end:\n",
    "            continue\n",
    "        num_left_context = position - doc_span.start\n",
    "        num_right_context = end - position\n",
    "        score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n",
    "        if best_score is None or score > best_score:\n",
    "            best_score = score\n",
    "            best_span_index = span_index\n",
    "\n",
    "    return cur_span_index == best_span_index"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### check_is_max_context()函数\n",
    "    输入: doc_spans + 当前doc_span的下标 + position\n",
    "    过程: 根据当前需要判断的token，计算它在不同doc_span中的得分:\n",
    "    得分考量: 上下相关context的数目、doc_span的数目\n",
    "    根据得分标准获取最佳得分的doc_span\n",
    "    输出: bool值(当前doc_span就是最佳得分doc_span时，bool值为真)\n",
    "    作用: 检查当前的位置的token在当前doc_span里面是否是上下文最全的\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### 声明两个命名元组"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "PrelimPrediction = collections.namedtuple(\"PrelimPrediction\",\n",
    "    [\"crop_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n",
    "\n",
    "NbestPrediction = collections.namedtuple(\"NbestPrediction\", [\n",
    "    \"text\", \"start_logit\", \"end_logit\",\n",
    "    \"start_index\", \"end_index\",\n",
    "    \"orig_doc_start\", \"orig_doc_end\", \"crop_index\"])\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### 两个命名元组的构成\n",
    "\n",
    "- PrelimPrediction\n",
    "      crop_index\n",
    "      start_index\n",
    "      end_index\n",
    "      start_logit\n",
    "      end_logit\n",
    "      \n",
    "- NbestPrediction\n",
    "      text\n",
    "      start_logit\n",
    "      end_logit\n",
    "      start_index\n",
    "      end_index\n",
    "      orig_doc_start\n",
    "      orig_doc_end\n",
    "      crop_index\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### clean_text()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def clean_text(tok_text):\n",
    "    # De-tokenize WordPieces that have been split off.\n",
    "    tok_text = tok_text.replace(\" ##\", \"\")\n",
    "    tok_text = tok_text.replace(\"##\", \"\")\n",
    "\n",
    "    # Clean whitespace\n",
    "    tok_text = tok_text.strip()\n",
    "    tok_text = \" \".join(tok_text.split())\n",
    "    return tok_text\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### clean_text()函数\n",
    "    输入: tok_text即crop.tokens预测部分的token转换而来的字符串\n",
    "    过程: 先去掉\" ##\" 和\"##\"，再去除前后空格再转字符串\n",
    "    输出: clean_text"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### get_nbest()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def get_nbest(prelim_predictions, crops, example, n_best_size):\n",
    "    seen, nbest = set(), []\n",
    "    for pred in prelim_predictions:\n",
    "        if len(nbest) >= n_best_size:\n",
    "            break\n",
    "        crop = crops[pred.crop_index]\n",
    "        orig_doc_start, orig_doc_end = -1, -1\n",
    "        # non-null\n",
    "        orig_doc_start, orig_doc_end = -1, -1\n",
    "        if pred.start_index > 0:\n",
    "            # Long answer has no end_index. We still generate some text to check\n",
    "            if pred.end_index == -1:\n",
    "                tok_tokens = crop.tokens[pred.start_index: pred.start_index + 11]\n",
    "            else:\n",
    "                tok_tokens = crop.tokens[pred.start_index: pred.end_index + 1]\n",
    "            tok_text = \" \".join(tok_tokens)\n",
    "            tok_text = clean_text(tok_text)\n",
    "\n",
    "            orig_doc_start = int(crop.token_to_orig_map[pred.start_index])\n",
    "            if pred.end_index == -1:\n",
    "                orig_doc_end = orig_doc_start + 10\n",
    "            else:\n",
    "                orig_doc_end = int(crop.token_to_orig_map[pred.end_index])\n",
    "\n",
    "            final_text = tok_text\n",
    "            if final_text in seen:\n",
    "                continue\n",
    "\n",
    "        else:\n",
    "            final_text = \"\"\n",
    "\n",
    "        seen.add(final_text)\n",
    "        nbest.append(NbestPrediction(\n",
    "            text=final_text,\n",
    "            start_logit=pred.start_logit, end_logit=pred.end_logit,\n",
    "            start_index=pred.start_index, end_index=pred.end_index,\n",
    "            orig_doc_start=orig_doc_start, orig_doc_end=orig_doc_end,\n",
    "            crop_index=pred.crop_index))\n",
    "\n",
    "    # Degenerate case. I never saw this happen.\n",
    "    if len(nbest) in (0, 1):\n",
    "        nbest.insert(0, NbestPrediction(text=\"empty\",\n",
    "            start_logit=0.0, end_logit=0.0,\n",
    "            start_index=-1, end_index=-1,\n",
    "            orig_doc_start=-1, orig_doc_end=-1,\n",
    "            crop_index=UNMAPPED))\n",
    "\n",
    "    assert len(nbest) >= 1\n",
    "    return nbest"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### get_nbest()函数\n",
    "    输入: prelim_predictions + crops + example + n_best_size\n",
    "    过程: \n",
    "    输出:\n",
    "    作用:"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### write_predictions()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def write_predictions(examples_gen, all_crops, all_results, n_best_size,\n",
    "                      max_answer_length, output_prediction_file,\n",
    "                      output_nbest_file, output_null_log_odds_file, verbose_logging,\n",
    "                      short_null_score_diff, long_null_score_diff):\n",
    "    \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n",
    "    logger.info(\"Writing predictions to: %s\" % output_prediction_file)\n",
    "    logger.info(\"Writing nbest to: %s\" % output_nbest_file)\n",
    "\n",
    "    # create indexes\n",
    "    example_index_to_crops = collections.defaultdict(list)\n",
    "    for crop in all_crops:\n",
    "        example_index_to_crops[crop.example_index].append(crop)\n",
    "    unique_id_to_result = {result.unique_id: result for result in all_results}\n",
    "\n",
    "    all_predictions = collections.OrderedDict()\n",
    "    all_nbest_json = collections.OrderedDict()\n",
    "    scores_diff_json = collections.OrderedDict()\n",
    "    short_num_empty, long_num_empty = 0, 0\n",
    "    for example_index, example in enumerate(examples_gen):\n",
    "        if example_index % 1000 == 0 and example_index > 0:\n",
    "            logger.info(f'[{example_index}]: {short_num_empty} short and {long_num_empty} long empty')\n",
    "\n",
    "        crops = example_index_to_crops[example_index]\n",
    "        short_prelim_predictions, long_prelim_predictions = [], []\n",
    "        for crop_index, crop in enumerate(crops):\n",
    "            assert crop.unique_id in unique_id_to_result, f\"{crop.unique_id}\"\n",
    "            result = unique_id_to_result[crop.unique_id]\n",
    "            # get the `n_best_size` largest indexes\n",
    "            # https://stackoverflow.com/questions/6910641/how-do-i-get-indices-of-n-maximum-values-in-a-numpy-array#23734295\n",
    "            start_indexes = np.argpartition(result.start_logits, -n_best_size)[-n_best_size:]\n",
    "            start_indexes = [int(x) for x in start_indexes]\n",
    "            end_indexes = np.argpartition(result.end_logits, -n_best_size)[-n_best_size:]\n",
    "            end_indexes = [int(x) for x in end_indexes]\n",
    "\n",
    "            # create short answers\n",
    "            for start_index in start_indexes:\n",
    "                if start_index >= len(crop.tokens):\n",
    "                    continue\n",
    "                # this skips [CLS] i.e. null prediction\n",
    "                if crop.token_to_orig_map[start_index] == UNMAPPED:\n",
    "                    continue\n",
    "                if not crop.token_is_max_context[start_index]:\n",
    "                    continue\n",
    "\n",
    "                for end_index in end_indexes:\n",
    "                    if end_index >= len(crop.tokens):\n",
    "                        continue\n",
    "                    if crop.token_to_orig_map[end_index] == UNMAPPED:\n",
    "                        continue\n",
    "                    if end_index < start_index:\n",
    "                        continue\n",
    "                    length = end_index - start_index + 1\n",
    "                    if length > max_answer_length:\n",
    "                        continue\n",
    "\n",
    "                    short_prelim_predictions.append(PrelimPrediction(\n",
    "                        crop_index=crop_index,\n",
    "                        start_index=start_index,\n",
    "                        end_index=end_index,\n",
    "                        start_logit=result.start_logits[start_index],\n",
    "                        end_logit=result.end_logits[end_index]))\n",
    "\n",
    "            long_indexes = np.argpartition(result.long_logits, -n_best_size)[-n_best_size:].tolist()\n",
    "            for long_index in long_indexes:\n",
    "                if long_index >= len(crop.tokens):\n",
    "                    continue\n",
    "                # this skips [CLS] i.e. null prediction\n",
    "                if crop.token_to_orig_map[long_index] == UNMAPPED:\n",
    "                    continue\n",
    "                # TODO(see--): Is this needed?\n",
    "                # -> Yep helps both short and long by about 0.1\n",
    "                if not crop.token_is_max_context[long_index]:\n",
    "                    continue\n",
    "                long_prelim_predictions.append(PrelimPrediction(\n",
    "                    crop_index=crop_index,\n",
    "                    start_index=long_index, end_index=-1,\n",
    "                    start_logit=result.long_logits[long_index],\n",
    "                    end_logit=result.long_logits[long_index]))\n",
    "\n",
    "        short_prelim_predictions = sorted(short_prelim_predictions,\n",
    "            key=lambda x: x.start_logit + x.end_logit, reverse=True)\n",
    "\n",
    "        short_nbest = get_nbest(short_prelim_predictions, crops,\n",
    "            example, n_best_size)\n",
    "\n",
    "        short_best_non_null = None\n",
    "        for entry in short_nbest:\n",
    "            if short_best_non_null is None:\n",
    "                if entry.text != \"\":\n",
    "                    short_best_non_null = entry\n",
    "\n",
    "        long_prelim_predictions = sorted(long_prelim_predictions,\n",
    "            key=lambda x: x.start_logit, reverse=True)\n",
    "\n",
    "        long_nbest = get_nbest(long_prelim_predictions, crops,\n",
    "            example, n_best_size)\n",
    "\n",
    "        long_best_non_null = None\n",
    "        for entry in long_nbest:\n",
    "            if long_best_non_null is None:\n",
    "                if entry.text != \"\":\n",
    "                    long_best_non_null = entry\n",
    "\n",
    "        nbest_json = {'short': [], 'long': []}\n",
    "        for kk, entries in [('short', short_nbest), ('long', long_nbest)]:\n",
    "            for i, entry in enumerate(entries):\n",
    "                output = {}\n",
    "                output[\"text\"] = entry.text\n",
    "                output[\"start_logit\"] = entry.start_logit\n",
    "                output[\"end_logit\"] = entry.end_logit\n",
    "                output[\"start_index\"] = entry.start_index\n",
    "                output[\"end_index\"] = entry.end_index\n",
    "                output[\"orig_doc_start\"] = entry.orig_doc_start\n",
    "                output[\"orig_doc_end\"] = entry.orig_doc_end\n",
    "                nbest_json[kk].append(output)\n",
    "\n",
    "        assert len(nbest_json['short']) >= 1\n",
    "        assert len(nbest_json['long']) >= 1\n",
    "\n",
    "        # We use the [CLS] score of the crop that has the maximum positive score\n",
    "        # long_score_diff = min_long_score_null - long_best_non_null.start_logit\n",
    "        # Predict \"\" if null score - the score of best non-null > threshold\n",
    "        try:\n",
    "            crop_unique_id = crops[short_best_non_null.crop_index].unique_id\n",
    "            start_score_null = unique_id_to_result[crop_unique_id].start_logits[CLS_INDEX]\n",
    "            end_score_null = unique_id_to_result[crop_unique_id].end_logits[CLS_INDEX]\n",
    "            short_score_null = start_score_null + end_score_null\n",
    "            short_score_diff = short_score_null - (short_best_non_null.start_logit +\n",
    "                short_best_non_null.end_logit)\n",
    "\n",
    "            if short_score_diff > short_null_score_diff:\n",
    "                final_pred = (\"\", -1, -1)\n",
    "                short_num_empty += 1\n",
    "            else:\n",
    "                final_pred = (short_best_non_null.text, short_best_non_null.orig_doc_start,\n",
    "                    short_best_non_null.orig_doc_end)\n",
    "        except Exception as e:\n",
    "            print(e)\n",
    "            final_pred = (\"\", -1, -1)\n",
    "            short_num_empty += 1\n",
    "\n",
    "        try:\n",
    "            long_score_null = unique_id_to_result[crops[\n",
    "                long_best_non_null.crop_index].unique_id].long_logits[CLS_INDEX]\n",
    "            long_score_diff = long_score_null - long_best_non_null.start_logit\n",
    "            scores_diff_json[example.qas_id] = {'short_score_diff': short_score_diff,\n",
    "                'long_score_diff': long_score_diff}\n",
    "\n",
    "            if long_score_diff > long_null_score_diff:\n",
    "                final_pred += (\"\", -1)\n",
    "                long_num_empty += 1\n",
    "                # print(f\"LONG EMPTY: {round(long_score_null, 2)} vs \"\n",
    "                #     f\"{round(long_best_non_null.start_logit, 2)} (th {long_null_score_diff})\")\n",
    "\n",
    "            else:\n",
    "                final_pred += (long_best_non_null.text, long_best_non_null.orig_doc_start)\n",
    "\n",
    "        except Exception as e:\n",
    "            print(e)\n",
    "            final_pred += (\"\", -1)\n",
    "            long_num_empty += 1\n",
    "\n",
    "        all_predictions[example.qas_id] = final_pred\n",
    "        all_nbest_json[example.qas_id] = nbest_json\n",
    "\n",
    "    if output_prediction_file is not None:\n",
    "        with open(output_prediction_file, \"w\") as writer:\n",
    "            writer.write(json.dumps(all_predictions, indent=2))\n",
    "\n",
    "    if output_nbest_file is not None:\n",
    "        with open(output_nbest_file, \"w\") as writer:\n",
    "            writer.write(json.dumps(all_nbest_json, indent=2))\n",
    "\n",
    "    if output_null_log_odds_file is not None:\n",
    "        with open(output_null_log_odds_file, \"w\") as writer:\n",
    "            writer.write(json.dumps(scores_diff_json, indent=2))\n",
    "\n",
    "    logger.info(f'{short_num_empty} short and {long_num_empty} long empty of'\n",
    "        f' {example_index}')\n",
    "    return all_predictions"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### write_predictions()函数\n",
    "    "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### convert_preds_to_df()函数"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def convert_preds_to_df(preds, candidates):\n",
    "  num_found_long, num_searched_long = 0, 0\n",
    "  df = {'example_id': [], 'PredictionString': []}\n",
    "  for example_id, pred in preds.items():\n",
    "    short_text, start_token, end_token, long_text, long_token = pred\n",
    "    df['example_id'].append(example_id + '_short')\n",
    "    short_answer = ''\n",
    "    if start_token != -1:\n",
    "      # +1 is required to make the token inclusive\n",
    "      short_answer = f'{start_token}:{end_token + 1}'\n",
    "    df['PredictionString'].append(short_answer)\n",
    "\n",
    "    # print(entry['document_text'].split(' ')[start_token: end_token + 1])\n",
    "    # find the long answer\n",
    "    long_answer = ''\n",
    "    found_long = False\n",
    "    min_dist = 1_000_000\n",
    "    if long_token != -1:\n",
    "      num_searched_long += 1\n",
    "      for candidate in candidates[example_id]:\n",
    "        cstart, cend = candidate.start_token, candidate.end_token\n",
    "        dist = abs(cstart - long_token)\n",
    "        if dist < min_dist:\n",
    "          min_dist = dist\n",
    "        if long_token == cstart:\n",
    "          long_answer = f'{cstart}:{cend}'\n",
    "          found_long = True\n",
    "          break\n",
    "\n",
    "      if found_long:\n",
    "        num_found_long += 1\n",
    "      else:\n",
    "        logger.info(f\"Not found: {min_dist}\")\n",
    "\n",
    "    df['example_id'].append(example_id + '_long')\n",
    "    df['PredictionString'].append(long_answer)\n",
    "\n",
    "  df = pd.DataFrame(df)\n",
    "  print(f'Found {num_found_long} of {num_searched_long} (total {len(preds)})')\n",
    "  return df"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "#### convert_preds_to_df()函数\n",
    "    输入: 预测preds + 候选答案candidates\n",
    "    过程: 从preds中读取example的id和对应的预测字串。若问题可回答，\n",
    "    对于短回答，输出start_token和end_token范围内的token对应的字符串即可。\n",
    "    对于长回答，需要在候选列表中进行搜寻，找出long_token==cstart的那一个\n",
    "    回答进行输出。(这里选择的原因不太明白)\n",
    "    输出: 输出的表格共有两列，分别是example的id和预测的字符串\n",
    "    作用: 输出最终的预测结果(以表格形式)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 总结\n",
    "\n",
    "- \n",
    "- \n",
    "- \n",
    "- \n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "source": [],
    "metadata": {
     "collapsed": false
    }
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}