{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# default_exp bert_preprocessing.bert_utils\n",
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Bert Utils\n",
    "\n",
    "Code exported from bert code base"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# export\n",
    "import random\n",
    "import collections\n",
    "\n",
    "\n",
    "def _truncate_seq_pair(tokens_a, tokens_b, max_length, rng):\n",
    "    \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n",
    "\n",
    "    # This is a simple heuristic which will always truncate the longer sequence\n",
    "    # one token at a time. This makes more sense than truncating an equal percent\n",
    "    # of tokens from each, since if one sequence is very short then each token\n",
    "    # that's truncated likely contains more information than a longer sequence.\n",
    "    if rng is None:\n",
    "        while True:\n",
    "            total_length = len(tokens_a) + len(tokens_b)\n",
    "            if total_length <= max_length:\n",
    "                break\n",
    "            if len(tokens_a) > len(tokens_b):\n",
    "                tokens_a.pop()\n",
    "            else:\n",
    "                tokens_b.pop()\n",
    "    else:\n",
    "        while True:\n",
    "            total_length = len(tokens_a) + len(tokens_b)\n",
    "            if total_length <= max_length:\n",
    "                break\n",
    "\n",
    "            trunc_tokens = tokens_a if len(\n",
    "                tokens_a) > len(tokens_b) else tokens_b\n",
    "            assert len(trunc_tokens) >= 1\n",
    "\n",
    "            # We want to sometimes truncate from the front and sometimes from the\n",
    "            # back to add more randomness and avoid biases.\n",
    "            if rng.random() < 0.5:\n",
    "                del trunc_tokens[0]\n",
    "            else:\n",
    "                trunc_tokens.pop()\n",
    "\n",
    "\n",
    "def truncate_seq_pair(tokens_a, tokens_b, target, max_length, rng=None, is_seq=False):\n",
    "    if tokens_b is None:\n",
    "        if len(tokens_a) > max_length - 2:\n",
    "            tokens_a = tokens_a[0:(max_length - 2)]\n",
    "            if is_seq:\n",
    "                target = target[0:(max_length - 2)]\n",
    "\n",
    "    else:\n",
    "        _truncate_seq_pair(tokens_a, tokens_b, max_length-3, rng)\n",
    "\n",
    "    return tokens_a, tokens_b, target\n",
    "\n",
    "\n",
    "def punc_augument(raw_inputs, params):\n",
    "    \"\"\"This code is dedicated in memory of a special time.\n",
    "    \"\"\"\n",
    "    for char_ind, char in enumerate(raw_inputs):\n",
    "        if char in params.punc_list:\n",
    "            if random.uniform(0, 1) <= params.punc_replace_prob:\n",
    "                raw_inputs[char_ind] = random.choice(params.punc_list)\n",
    "\n",
    "    return raw_inputs\n",
    "\n",
    "# some code block from run_pretraining.py\n",
    "\n",
    "\n",
    "def create_instances_from_document(\n",
    "        all_documents, document_index, max_seq_length, short_seq_prob,\n",
    "        masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n",
    "    \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n",
    "    document = all_documents[document_index]\n",
    "\n",
    "    # Account for [CLS], [SEP], [SEP]\n",
    "    max_num_tokens = max_seq_length - 3\n",
    "\n",
    "    # We *usually* want to fill up the entire sequence since we are padding\n",
    "    # to `max_seq_length` anyways, so short sequences are generally wasted\n",
    "    # computation. However, we *sometimes*\n",
    "    # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n",
    "    # sequences to minimize the mismatch between pre-training and fine-tuning.\n",
    "    # The `target_seq_length` is just a rough target however, whereas\n",
    "    # `max_seq_length` is a hard limit.\n",
    "    target_seq_length = max_num_tokens\n",
    "    if rng.random() < short_seq_prob:\n",
    "        target_seq_length = rng.randint(2, max_num_tokens)\n",
    "\n",
    "    # We DON'T just concatenate all of the tokens from a document into a long\n",
    "    # sequence and choose an arbitrary split point because this would make the\n",
    "    # next sentence prediction task too easy. Instead, we split the input into\n",
    "    # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n",
    "    # input.\n",
    "    instances = []\n",
    "    current_chunk = []\n",
    "    current_length = 0\n",
    "    i = 0\n",
    "    while i < len(document):\n",
    "        segment = document[i]\n",
    "        current_chunk.append(segment)\n",
    "        current_length += len(segment)\n",
    "        if i == len(document) - 1 or current_length >= target_seq_length:\n",
    "            if current_chunk:\n",
    "                # `a_end` is how many segments from `current_chunk` go into the `A`\n",
    "                # (first) sentence.\n",
    "                a_end = 1\n",
    "                if len(current_chunk) >= 2:\n",
    "                    a_end = rng.randint(1, len(current_chunk) - 1)\n",
    "\n",
    "                tokens_a = []\n",
    "                for j in range(a_end):\n",
    "                    tokens_a.extend(current_chunk[j])\n",
    "\n",
    "                tokens_b = []\n",
    "                # Random next\n",
    "                is_random_next = False\n",
    "                if len(current_chunk) == 1 or rng.random() < 0.5:\n",
    "                    is_random_next = True\n",
    "                    target_b_length = target_seq_length - len(tokens_a)\n",
    "\n",
    "                    # This should rarely go for more than one iteration for large\n",
    "                    # corpora. However, just to be careful, we try to make sure that\n",
    "                    # the random document is not the same as the document\n",
    "                    # we're processing.\n",
    "                    for _ in range(10):\n",
    "                        random_document_index = rng.randint(\n",
    "                            0, len(all_documents) - 1)\n",
    "                        if random_document_index != document_index:\n",
    "                            break\n",
    "\n",
    "                    random_document = all_documents[random_document_index]\n",
    "                    random_start = rng.randint(0, len(random_document) - 1)\n",
    "                    for j in range(random_start, len(random_document)):\n",
    "                        tokens_b.extend(random_document[j])\n",
    "                        if len(tokens_b) >= target_b_length:\n",
    "                            break\n",
    "                    # We didn't actually use these segments so we \"put them back\" so\n",
    "                    # they don't go to waste.\n",
    "                    num_unused_segments = len(current_chunk) - a_end\n",
    "                    i -= num_unused_segments\n",
    "                # Actual next\n",
    "                else:\n",
    "                    is_random_next = False\n",
    "                    for j in range(a_end, len(current_chunk)):\n",
    "                        tokens_b.extend(current_chunk[j])\n",
    "                truncate_seq_pair(tokens_a, tokens_b, None,\n",
    "                                  max_num_tokens, rng)\n",
    "                if len(tokens_a) < 1 or len(tokens_b) < 1:\n",
    "                    current_chunk = []\n",
    "                    current_length = 0\n",
    "                    i += 1\n",
    "                    continue\n",
    "                assert len(tokens_a) >= 1, tokens_a\n",
    "                assert len(tokens_b) >= 1, tokens_b\n",
    "\n",
    "                tokens = []\n",
    "                segment_ids = []\n",
    "                tokens.append(\"[CLS]\")\n",
    "                segment_ids.append(0)\n",
    "                for token in tokens_a:\n",
    "                    tokens.append(token)\n",
    "                    segment_ids.append(0)\n",
    "\n",
    "                tokens.append(\"[SEP]\")\n",
    "                segment_ids.append(0)\n",
    "\n",
    "                for token in tokens_b:\n",
    "                    tokens.append(token)\n",
    "                    segment_ids.append(1)\n",
    "                tokens.append(\"[SEP]\")\n",
    "                segment_ids.append(1)\n",
    "\n",
    "                (tokens, masked_lm_positions,\n",
    "                 masked_lm_labels) = create_masked_lm_predictions(\n",
    "                     tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n",
    "                instance = TrainingInstance(\n",
    "                    tokens=tokens,\n",
    "                    segment_ids=segment_ids,\n",
    "                    is_random_next=is_random_next,\n",
    "                    masked_lm_positions=masked_lm_positions,\n",
    "                    masked_lm_labels=masked_lm_labels)\n",
    "                instances.append(instance)\n",
    "            current_chunk = []\n",
    "            current_length = 0\n",
    "        i += 1\n",
    "\n",
    "    return instances\n",
    "\n",
    "\n",
    "MaskedLmInstance = collections.namedtuple(\"MaskedLmInstance\",\n",
    "                                          [\"index\", \"label\"])\n",
    "TrainingInstance = collections.namedtuple(\"TrainingInstance\",\n",
    "                                          ['tokens', 'segment_ids',\n",
    "                                           'masked_lm_positions',\n",
    "                                           'masked_lm_labels',\n",
    "                                           'is_random_next'])\n",
    "\n",
    "def create_masked_lm_predictions(tokens, masked_lm_prob,\n",
    "                                 max_predictions_per_seq, vocab_words, rng):\n",
    "    \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n",
    "\n",
    "    cand_indexes = []\n",
    "    for (i, token) in enumerate(tokens):\n",
    "        if token == \"[CLS]\" or token == \"[SEP]\":\n",
    "            continue\n",
    "        cand_indexes.append(i)\n",
    "\n",
    "    rng.shuffle(cand_indexes)\n",
    "\n",
    "    output_tokens = list(tokens)\n",
    "\n",
    "    num_to_predict = min(max_predictions_per_seq,\n",
    "                         max(1, int(round(len(tokens) * masked_lm_prob))))\n",
    "\n",
    "    masked_lms = []\n",
    "    covered_indexes = set()\n",
    "    for index in cand_indexes:\n",
    "        if len(masked_lms) >= num_to_predict:\n",
    "            break\n",
    "        if index in covered_indexes:\n",
    "            continue\n",
    "        covered_indexes.add(index)\n",
    "\n",
    "        masked_token = None\n",
    "        # 80% of the time, replace with [MASK]\n",
    "        if rng.random() < 0.8:\n",
    "            masked_token = \"[MASK]\"\n",
    "        else:\n",
    "            # 10% of the time, keep original\n",
    "            if rng.random() < 0.5:\n",
    "                masked_token = tokens[index]\n",
    "            # 10% of the time, replace with random word\n",
    "            else:\n",
    "                masked_token = vocab_words[rng.randint(\n",
    "                    0, len(vocab_words) - 1)]\n",
    "\n",
    "        output_tokens[index] = masked_token\n",
    "\n",
    "        masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n",
    "\n",
    "    masked_lms = sorted(masked_lms, key=lambda x: x.index)\n",
    "\n",
    "    masked_lm_positions = []\n",
    "    masked_lm_labels = []\n",
    "    for p in masked_lms:\n",
    "        masked_lm_positions.append(p.index)\n",
    "        masked_lm_labels.append(p.label)\n",
    "\n",
    "    return (output_tokens, masked_lm_positions, masked_lm_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
