{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoTokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'transformers.tokenization_utils_base.BatchEncoding'>\n",
      "{'input_ids': [101, 8667, 1362, 106, 102], 'token_type_ids': [0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1]}\n",
      "tokenizer.is_fast: True\n",
      "encoding.is_fast: True\n"
     ]
    }
   ],
   "source": [
    "example = \"Hello world!\"\n",
    "encoding = tokenizer(example)\n",
    "print(type(encoding))\n",
    "print(encoding)\n",
    "print('tokenizer.is_fast:', tokenizer.is_fast)\n",
    "print('encoding.is_fast:', encoding.is_fast)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['[CLS]', 'Hello', 'world', '!', '[SEP]']"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoding.tokens()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['[CLS]', 'My', 'name', 'is', 'S', '##yl', '##va', '##in', 'and', 'I', 'work', 'at', 'Hu', '##gging', 'Face', 'in', 'Brooklyn', '.', '[SEP]']\n"
     ]
    }
   ],
   "source": [
    "example = \"My name is Sylvain and I work at Hugging Face in Brooklyn.\"\n",
    "encoding = tokenizer(example)\n",
    "print(encoding.tokens())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[None, 0, 1, 2, 3, 3, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, None]\n"
     ]
    }
   ],
   "source": [
    "print(encoding.word_ids())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "the 5th token is: ##yl\n",
      "corresponding text span is: 12 14 yl\n",
      "corresponding word span is: Sylvain\n"
     ]
    }
   ],
   "source": [
    "token_index = 5\n",
    "print('the 5th token is:', encoding.tokens()[token_index])\n",
    "start, end = encoding.token_to_chars(token_index)\n",
    "print('corresponding text span is:', start, end, example[start:end])\n",
    "word_index = encoding.word_ids()[token_index] # 3\n",
    "start, end = encoding.word_to_chars(word_index)\n",
    "print('corresponding word span is:', example[start:end])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "the 5th token is: ##yl\n",
      "corresponding word index is: 3\n",
      "the word is: Sylvain\n",
      "corresponding tokens are: ['S', '##yl', '##va', '##in']\n"
     ]
    }
   ],
   "source": [
    "token_index = 5\n",
    "print('the 5th token is:', encoding.tokens()[token_index])\n",
    "corresp_word_index = encoding.token_to_word(token_index)\n",
    "print('corresponding word index is:', corresp_word_index)\n",
    "start, end = encoding.word_to_chars(corresp_word_index)\n",
    "print('the word is:', example[start:end])\n",
    "start, end = encoding.word_to_tokens(corresp_word_index)\n",
    "print('corresponding tokens are:', encoding.tokens()[start:end])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "characters of \"My name is Sylvain\" ars: ['M', 'y', ' ', 'n', 'a', 'm', 'e', ' ', 'i', 's', ' ', 'S', 'y', 'l', 'v', 'a', 'i', 'n']\n",
      "corresponding word index: \n",
      "\"M\": 0 \"y\": 0 \" \": None \"n\": 1 \"a\": 1 \"m\": 1 \"e\": 1 \" \": None \"i\": 2 \"s\": 2 \" \": None \"S\": 3 \"y\": 3 \"l\": 3 \"v\": 3 \"a\": 3 \"i\": 3 \"n\": 3 \n",
      "corresponding token index: \n",
      "\"M\": 1 \"y\": 1 \" \": None \"n\": 2 \"a\": 2 \"m\": 2 \"e\": 2 \" \": None \"i\": 3 \"s\": 3 \" \": None \"S\": 4 \"y\": 5 \"l\": 5 \"v\": 6 \"a\": 6 \"i\": 7 \"n\": 7 "
     ]
    }
   ],
   "source": [
    "chars = 'My name is Sylvain'\n",
    "print('characters of \"{}\" ars: {}'.format(chars, list(chars)))\n",
    "print('corresponding word index: ')\n",
    "for i, c in enumerate(chars):\n",
    "    print('\"{}\": {} '.format(c, encoding.char_to_word(i)), end=\"\")\n",
    "print('\\ncorresponding token index: ')\n",
    "for i, c in enumerate(chars):\n",
    "    print('\"{}\": {} '.format(c, encoding.char_to_token(i)), end=\"\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-06-26 17:42:54.388140: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "/Users/mac/Library/Python/3.9/lib/python/site-packages/threadpoolctl.py:1214: RuntimeWarning: \n",
      "Found Intel OpenMP ('libiomp') and LLVM OpenMP ('libomp') loaded at\n",
      "the same time. Both libraries are known to be incompatible and this\n",
      "can cause random crashes or deadlocks on Linux when loaded in the\n",
      "same Python program.\n",
      "Using threadpoolctl may cause crashes or deadlocks. For more\n",
      "information and possible workarounds, please see\n",
      "    https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md\n",
      "\n",
      "  warnings.warn(msg, RuntimeWarning)\n",
      "No model was supplied, defaulted to distilbert/distilbert-base-cased-distilled-squad and revision 626af31 (https://hf-mirror.com/distilbert/distilbert-base-cased-distilled-squad).\n",
      "Using a pipeline without specifying a model name and revision in production is not recommended.\n",
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c5fa09ebe4b546d99c7d6d995182cac6",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "config.json:   0%|          | 0.00/473 [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3bb7d8aa678c4ad6a211650ae5e3f1d7",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "model.safetensors:   0%|          | 0.00/261M [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a024ebfbda234d3997d422a93dc97ffd",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "tokenizer_config.json:   0%|          | 0.00/49.0 [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c190cdcdb3b54c61bf6f22ce72e4e70b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "vocab.txt: 0.00B [00:00, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "47fe1307e64f4744abf75d4a9cd554c6",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "tokenizer.json: 0.00B [00:00, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'score': 0.9741137027740479, 'start': 76, 'end': 104, 'answer': 'Jax, PyTorch, and TensorFlow'}\n"
     ]
    }
   ],
   "source": [
    "from transformers import pipeline\n",
    "\n",
    "question_answerer = pipeline(\"question-answering\")\n",
    "context = \"\"\"\n",
    "Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch, and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.\n",
    "\"\"\"\n",
    "question = \"Which deep learning libraries back Transformers?\"\n",
    "results = question_answerer(question=question, context=context)\n",
    "print(results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "61b08777c00c442db0b91f3bb3fa9567",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "tokenizer_config.json:   0%|          | 0.00/49.0 [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "8394fe274eeb44e5b65811c754976d5e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "config.json:   0%|          | 0.00/473 [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "558a6b61ec714048891b6d6d6aaaa18d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "vocab.txt: 0.00B [00:00, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "276a144693df4a2394e4a08ca1f59d15",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "tokenizer.json: 0.00B [00:00, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "cb66e728e49d404a8c2323bd16848bd0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "model.safetensors:   0%|          | 0.00/261M [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "inputs {'input_ids': tensor([[  101,  5979,  1996,  3776,  9818,  1171, 25267,   136,   102, 25267,\n",
      "          1110,  5534,  1118,  1103,  1210,  1211,  1927,  1996,  3776,  9818,\n",
      "           783, 13612,   117,   153,  1183,  1942,  1766,  1732,   117,  1105,\n",
      "          5157, 21484,  2271,  6737,   783,  1114,   170,  2343,  1306,  2008,\n",
      "          9111,  1206,  1172,   119,  1135,   112,   188, 21546,  1106,  2669,\n",
      "          1240,  3584,  1114,  1141,  1196, 10745,  1172,  1111,  1107, 16792,\n",
      "          1114,  1103,  1168,   119,   102]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n",
      "outputs QuestionAnsweringModelOutput(loss=None, start_logits=tensor([[-3.9582, -5.9036, -3.9443, -6.2182, -6.4083, -7.1622, -6.0466, -5.1919,\n",
      "         -4.0218,  1.1040, -3.9652, -1.5413, -2.2242,  3.1515,  6.2945, -0.4716,\n",
      "         -1.4832, -0.5067, -4.3221, -1.6551,  2.5779, 10.9044, -0.6544,  2.6956,\n",
      "         -1.1208, -1.6860, -3.7357, -1.6676, -1.5421, -1.8649,  2.0896, -1.2079,\n",
      "         -0.7890,  0.0215, -1.3682, -3.5892, -4.3107, -3.8289, -7.1438, -5.9742,\n",
      "         -3.7412, -5.6779, -4.2294, -4.4258, -2.2509, -6.1912, -7.2860, -3.6947,\n",
      "         -6.6102, -3.8975, -3.4443, -2.6780, -7.3615, -4.1177, -6.7804, -4.3929,\n",
      "         -6.6828, -7.4341, -5.9426, -6.6557, -8.2156, -6.9574, -6.2020, -6.1046,\n",
      "         -4.0218]], grad_fn=<CloneBackward0>), end_logits=tensor([[-1.7854, -6.2361, -6.2518, -5.3445, -5.2671, -8.1038, -5.0321, -5.9211,\n",
      "         -3.2731, -0.7021, -6.1406, -4.3293, -5.8735, -4.2517,  4.9747, -3.4800,\n",
      "          0.0339, -3.4037, -1.4726,  2.4518, -0.8068,  2.2278,  0.7126, -0.5041,\n",
      "          0.2587, -0.3865, -0.6514,  4.5269,  2.0128,  0.7227,  2.0128,  2.6679,\n",
      "          1.7589, 11.4564,  7.0150, -4.5284, -6.1090, -5.9652, -6.3215, -4.4237,\n",
      "         -2.4921, -3.5017,  2.8051,  0.4060, -5.7715, -6.6872, -7.4680, -4.7417,\n",
      "         -8.0210, -4.9343, -4.8170, -1.4989, -7.1752, -1.5023, -6.4373, -5.4378,\n",
      "         -3.4248, -7.3257, -7.2739, -3.7352, -6.9190, -6.4329, -1.2230, -0.7769,\n",
      "         -3.2730]], grad_fn=<CloneBackward0>), hidden_states=None, attentions=None)\n",
      "torch.Size([1, 65])\n",
      "torch.Size([1, 65]) torch.Size([1, 65])\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoTokenizer, AutoModelForQuestionAnswering\n",
    "\n",
    "model_checkpoint = \"distilbert-base-cased-distilled-squad\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)\n",
    "model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)\n",
    "\n",
    "context = \"\"\"\n",
    "Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch, and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.\n",
    "\"\"\"\n",
    "question = \"Which deep learning libraries back Transformers?\"\n",
    "\n",
    "inputs = tokenizer(question, context, return_tensors=\"pt\")\n",
    "print(\"inputs\", inputs)\n",
    "outputs = model(**inputs)\n",
    "\n",
    "print(\"outputs\", outputs)\n",
    "print(inputs[\"input_ids\"].shape)\n",
    "\n",
    "start_logits = outputs.start_logits\n",
    "end_logits = outputs.end_logits\n",
    "print(start_logits.shape, end_logits.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[None,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " None,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " None]"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "inputs.sequence_ids()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sequence_ids [None, 0, 0, 0, 0, 0, 0, 0, None, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, None]\n",
      "tensor([[False,  True,  True,  True,  True,  True,  True,  True,  True, False,\n",
      "         False, False, False, False, False, False, False, False, False, False,\n",
      "         False, False, False, False, False, False, False, False, False, False,\n",
      "         False, False, False, False, False, False, False, False, False, False,\n",
      "         False, False, False, False, False, False, False, False, False, False,\n",
      "         False, False, False, False, False, False, False, False, False, False,\n",
      "         False, False, False, False,  True]])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "sequence_ids = inputs.sequence_ids()\n",
    "print(\"sequence_ids\", sequence_ids)\n",
    "\n",
    "mask = [i != 1 for i in sequence_ids]\n",
    "mask[0] = False # Unmask the [CLS] token\n",
    "mask = torch.tensor(mask)[None]\n",
    "print(mask)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "start_logits tensor([[-3.9582e+00, -1.0000e+04, -1.0000e+04, -1.0000e+04, -1.0000e+04,\n",
      "         -1.0000e+04, -1.0000e+04, -1.0000e+04, -1.0000e+04,  1.1040e+00,\n",
      "         -3.9652e+00, -1.5413e+00, -2.2242e+00,  3.1515e+00,  6.2945e+00,\n",
      "         -4.7158e-01, -1.4832e+00, -5.0673e-01, -4.3221e+00, -1.6551e+00,\n",
      "          2.5779e+00,  1.0904e+01, -6.5445e-01,  2.6956e+00, -1.1208e+00,\n",
      "         -1.6860e+00, -3.7357e+00, -1.6676e+00, -1.5421e+00, -1.8649e+00,\n",
      "          2.0896e+00, -1.2079e+00, -7.8900e-01,  2.1465e-02, -1.3682e+00,\n",
      "         -3.5892e+00, -4.3107e+00, -3.8289e+00, -7.1438e+00, -5.9742e+00,\n",
      "         -3.7412e+00, -5.6779e+00, -4.2294e+00, -4.4258e+00, -2.2509e+00,\n",
      "         -6.1912e+00, -7.2860e+00, -3.6947e+00, -6.6102e+00, -3.8975e+00,\n",
      "         -3.4443e+00, -2.6780e+00, -7.3615e+00, -4.1177e+00, -6.7804e+00,\n",
      "         -4.3929e+00, -6.6828e+00, -7.4341e+00, -5.9426e+00, -6.6557e+00,\n",
      "         -8.2156e+00, -6.9574e+00, -6.2020e+00, -6.1046e+00, -1.0000e+04]],\n",
      "       grad_fn=<IndexPutBackward0>)\n",
      "end_logits tensor([[-1.7854e+00, -1.0000e+04, -1.0000e+04, -1.0000e+04, -1.0000e+04,\n",
      "         -1.0000e+04, -1.0000e+04, -1.0000e+04, -1.0000e+04, -7.0214e-01,\n",
      "         -6.1406e+00, -4.3293e+00, -5.8735e+00, -4.2517e+00,  4.9747e+00,\n",
      "         -3.4800e+00,  3.3870e-02, -3.4037e+00, -1.4726e+00,  2.4518e+00,\n",
      "         -8.0678e-01,  2.2278e+00,  7.1256e-01, -5.0411e-01,  2.5871e-01,\n",
      "         -3.8652e-01, -6.5142e-01,  4.5269e+00,  2.0128e+00,  7.2273e-01,\n",
      "          2.0128e+00,  2.6679e+00,  1.7589e+00,  1.1456e+01,  7.0150e+00,\n",
      "         -4.5284e+00, -6.1090e+00, -5.9652e+00, -6.3215e+00, -4.4237e+00,\n",
      "         -2.4921e+00, -3.5017e+00,  2.8051e+00,  4.0600e-01, -5.7715e+00,\n",
      "         -6.6872e+00, -7.4680e+00, -4.7417e+00, -8.0210e+00, -4.9343e+00,\n",
      "         -4.8170e+00, -1.4989e+00, -7.1752e+00, -1.5023e+00, -6.4373e+00,\n",
      "         -5.4378e+00, -3.4248e+00, -7.3257e+00, -7.2739e+00, -3.7352e+00,\n",
      "         -6.9190e+00, -6.4329e+00, -1.2230e+00, -7.7690e-01, -1.0000e+04]],\n",
      "       grad_fn=<IndexPutBackward0>)\n",
      "start_probabilities tensor([3.4708e-07, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,\n",
      "        0.0000e+00, 0.0000e+00, 0.0000e+00, 5.4816e-05, 3.4465e-07, 3.8909e-06,\n",
      "        1.9656e-06, 4.2475e-04, 9.8426e-03, 1.1341e-05, 4.1240e-06, 1.0949e-05,\n",
      "        2.4119e-07, 3.4726e-06, 2.3933e-04, 9.8891e-01, 9.4453e-06, 2.6923e-04,\n",
      "        5.9250e-06, 3.3669e-06, 4.3357e-07, 3.4293e-06, 3.8880e-06, 2.8153e-06,\n",
      "        1.4688e-04, 5.4307e-06, 8.2562e-06, 1.8568e-05, 4.6263e-06, 5.0198e-07,\n",
      "        2.4396e-07, 3.9498e-07, 1.4353e-08, 4.6224e-08, 4.3120e-07, 6.2166e-08,\n",
      "        2.6464e-07, 2.1743e-07, 1.9137e-06, 3.7209e-08, 1.2450e-08, 4.5169e-07,\n",
      "        2.4472e-08, 3.6878e-07, 5.8021e-07, 1.2485e-06, 1.1545e-08, 2.9591e-07,\n",
      "        2.0643e-08, 2.2472e-07, 2.2759e-08, 1.0737e-08, 4.7710e-08, 2.3382e-08,\n",
      "        4.9143e-09, 1.7293e-08, 3.6808e-08, 4.0573e-08, 0.0000e+00],\n",
      "       grad_fn=<SelectBackward0>)\n",
      "end_probabilities tensor([1.7483e-06, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,\n",
      "        0.0000e+00, 0.0000e+00, 0.0000e+00, 5.1650e-06, 2.2449e-08, 1.3734e-07,\n",
      "        2.9322e-08, 1.4843e-07, 1.5083e-03, 3.2113e-07, 1.0782e-05, 3.4657e-07,\n",
      "        2.3904e-06, 1.2101e-04, 4.6519e-06, 9.6727e-05, 2.1255e-05, 6.2962e-06,\n",
      "        1.3501e-05, 7.0818e-06, 5.4338e-06, 9.6382e-04, 7.8008e-05, 2.1473e-05,\n",
      "        7.8012e-05, 1.5019e-04, 6.0520e-05, 9.8504e-01, 1.1604e-02, 1.1255e-07,\n",
      "        2.3169e-08, 2.6751e-08, 1.8733e-08, 1.2497e-07, 8.6238e-07, 3.1422e-07,\n",
      "        1.7229e-04, 1.5643e-05, 3.2471e-08, 1.2995e-08, 5.9524e-09, 9.0934e-08,\n",
      "        3.4239e-09, 7.5001e-08, 8.4338e-08, 2.3283e-06, 7.9772e-09, 2.3204e-06,\n",
      "        1.6685e-08, 4.5334e-08, 3.3935e-07, 6.8630e-09, 7.2279e-09, 2.4879e-07,\n",
      "        1.0307e-08, 1.6759e-08, 3.0682e-06, 4.7930e-06, 0.0000e+00],\n",
      "       grad_fn=<SelectBackward0>)\n"
     ]
    }
   ],
   "source": [
    "start_logits[mask] = -10000\n",
    "end_logits[mask] = -10000\n",
    "\n",
    "print(\"start_logits\", start_logits)\n",
    "print(\"end_logits\", end_logits)\n",
    "\n",
    "start_probabilities = torch.nn.functional.softmax(start_logits, dim=-1)[0]\n",
    "end_probabilities = torch.nn.functional.softmax(end_logits, dim=-1)[0]\n",
    "print(\"start_probabilities\", start_probabilities)\n",
    "print(\"end_probabilities\", end_probabilities)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([65, 65]) tensor([[6.0679e-13, 0.0000e+00, 0.0000e+00,  ..., 1.0649e-12, 1.6636e-12,\n",
      "         0.0000e+00],\n",
      "        [0.0000e+00, 0.0000e+00, 0.0000e+00,  ..., 0.0000e+00, 0.0000e+00,\n",
      "         0.0000e+00],\n",
      "        [0.0000e+00, 0.0000e+00, 0.0000e+00,  ..., 0.0000e+00, 0.0000e+00,\n",
      "         0.0000e+00],\n",
      "        ...,\n",
      "        [0.0000e+00, 0.0000e+00, 0.0000e+00,  ..., 1.1293e-13, 1.7642e-13,\n",
      "         0.0000e+00],\n",
      "        [0.0000e+00, 0.0000e+00, 0.0000e+00,  ..., 0.0000e+00, 1.9447e-13,\n",
      "         0.0000e+00],\n",
      "        [0.0000e+00, 0.0000e+00, 0.0000e+00,  ..., 0.0000e+00, 0.0000e+00,\n",
      "         0.0000e+00]], grad_fn=<TriuBackward0>)\n"
     ]
    }
   ],
   "source": [
    "scores = start_probabilities[:, None] * end_probabilities[None, :]\n",
    "scores = torch.triu(scores)\n",
    "print(scores.shape, scores)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[3.4708e-07],\n",
       "        [0.0000e+00],\n",
       "        [0.0000e+00],\n",
       "        [0.0000e+00],\n",
       "        [0.0000e+00],\n",
       "        [0.0000e+00],\n",
       "        [0.0000e+00],\n",
       "        [0.0000e+00],\n",
       "        [0.0000e+00],\n",
       "        [5.4816e-05],\n",
       "        [3.4465e-07],\n",
       "        [3.8909e-06],\n",
       "        [1.9656e-06],\n",
       "        [4.2475e-04],\n",
       "        [9.8426e-03],\n",
       "        [1.1341e-05],\n",
       "        [4.1240e-06],\n",
       "        [1.0949e-05],\n",
       "        [2.4119e-07],\n",
       "        [3.4726e-06],\n",
       "        [2.3933e-04],\n",
       "        [9.8891e-01],\n",
       "        [9.4453e-06],\n",
       "        [2.6923e-04],\n",
       "        [5.9250e-06],\n",
       "        [3.3669e-06],\n",
       "        [4.3357e-07],\n",
       "        [3.4293e-06],\n",
       "        [3.8880e-06],\n",
       "        [2.8153e-06],\n",
       "        [1.4688e-04],\n",
       "        [5.4307e-06],\n",
       "        [8.2562e-06],\n",
       "        [1.8568e-05],\n",
       "        [4.6263e-06],\n",
       "        [5.0198e-07],\n",
       "        [2.4396e-07],\n",
       "        [3.9498e-07],\n",
       "        [1.4353e-08],\n",
       "        [4.6224e-08],\n",
       "        [4.3120e-07],\n",
       "        [6.2166e-08],\n",
       "        [2.6464e-07],\n",
       "        [2.1743e-07],\n",
       "        [1.9137e-06],\n",
       "        [3.7209e-08],\n",
       "        [1.2450e-08],\n",
       "        [4.5169e-07],\n",
       "        [2.4472e-08],\n",
       "        [3.6878e-07],\n",
       "        [5.8021e-07],\n",
       "        [1.2485e-06],\n",
       "        [1.1545e-08],\n",
       "        [2.9591e-07],\n",
       "        [2.0643e-08],\n",
       "        [2.2472e-07],\n",
       "        [2.2759e-08],\n",
       "        [1.0737e-08],\n",
       "        [4.7710e-08],\n",
       "        [2.3382e-08],\n",
       "        [4.9143e-09],\n",
       "        [1.7293e-08],\n",
       "        [3.6808e-08],\n",
       "        [4.0573e-08],\n",
       "        [0.0000e+00]], grad_fn=<UnsqueezeBackward0>)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "start_probabilities[:, None]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.7483e-06, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,\n",
       "         0.0000e+00, 0.0000e+00, 0.0000e+00, 5.1650e-06, 2.2449e-08, 1.3734e-07,\n",
       "         2.9322e-08, 1.4843e-07, 1.5083e-03, 3.2113e-07, 1.0782e-05, 3.4657e-07,\n",
       "         2.3904e-06, 1.2101e-04, 4.6519e-06, 9.6727e-05, 2.1255e-05, 6.2962e-06,\n",
       "         1.3501e-05, 7.0818e-06, 5.4338e-06, 9.6382e-04, 7.8008e-05, 2.1473e-05,\n",
       "         7.8012e-05, 1.5019e-04, 6.0520e-05, 9.8504e-01, 1.1604e-02, 1.1255e-07,\n",
       "         2.3169e-08, 2.6751e-08, 1.8733e-08, 1.2497e-07, 8.6238e-07, 3.1422e-07,\n",
       "         1.7229e-04, 1.5643e-05, 3.2471e-08, 1.2995e-08, 5.9524e-09, 9.0934e-08,\n",
       "         3.4239e-09, 7.5001e-08, 8.4338e-08, 2.3283e-06, 7.9772e-09, 2.3204e-06,\n",
       "         1.6685e-08, 4.5334e-08, 3.3935e-07, 6.8630e-09, 7.2279e-09, 2.4879e-07,\n",
       "         1.0307e-08, 1.6759e-08, 3.0682e-06, 4.7930e-06, 0.0000e+00]],\n",
       "       grad_fn=<SliceBackward0>)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "end_probabilities[None, :]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "max_index 1398\n",
      "inputs_with_offsets {'input_ids': [101, 5979, 1996, 3776, 9818, 1171, 25267, 136, 102, 25267, 1110, 5534, 1118, 1103, 1210, 1211, 1927, 1996, 3776, 9818, 783, 13612, 117, 153, 1183, 1942, 1766, 1732, 117, 1105, 5157, 21484, 2271, 6737, 783, 1114, 170, 2343, 1306, 2008, 9111, 1206, 1172, 119, 1135, 112, 188, 21546, 1106, 2669, 1240, 3584, 1114, 1141, 1196, 10745, 1172, 1111, 1107, 16792, 1114, 1103, 1168, 119, 102], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'offset_mapping': [(0, 0), (0, 5), (6, 10), (11, 19), (20, 29), (30, 34), (35, 47), (47, 48), (0, 0), (1, 13), (14, 16), (17, 23), (24, 26), (27, 30), (31, 36), (37, 41), (42, 49), (50, 54), (55, 63), (64, 73), (74, 75), (76, 79), (79, 80), (81, 82), (82, 83), (83, 84), (84, 86), (86, 88), (88, 89), (90, 93), (94, 97), (97, 100), (100, 101), (101, 104), (105, 106), (107, 111), (112, 113), (114, 117), (117, 118), (118, 122), (123, 134), (135, 142), (143, 147), (147, 148), (149, 151), (151, 152), (152, 153), (154, 169), (170, 172), (173, 178), (179, 183), (184, 190), (191, 195), (196, 199), (200, 206), (207, 214), (215, 219), (220, 223), (224, 226), (226, 233), (234, 238), (239, 242), (243, 248), (248, 249), (0, 0)]}\n",
      "offsets [(0, 0), (0, 5), (6, 10), (11, 19), (20, 29), (30, 34), (35, 47), (47, 48), (0, 0), (1, 13), (14, 16), (17, 23), (24, 26), (27, 30), (31, 36), (37, 41), (42, 49), (50, 54), (55, 63), (64, 73), (74, 75), (76, 79), (79, 80), (81, 82), (82, 83), (83, 84), (84, 86), (86, 88), (88, 89), (90, 93), (94, 97), (97, 100), (100, 101), (101, 104), (105, 106), (107, 111), (112, 113), (114, 117), (117, 118), (118, 122), (123, 134), (135, 142), (143, 147), (147, 148), (149, 151), (151, 152), (152, 153), (154, 169), (170, 172), (173, 178), (179, 183), (184, 190), (191, 195), (196, 199), (200, 206), (207, 214), (215, 219), (220, 223), (224, 226), (226, 233), (234, 238), (239, 242), (243, 248), (248, 249), (0, 0)]\n",
      "{'answer': 'Jax, PyTorch, and TensorFlow', 'start': 76, 'end': 104, 'score': 0.9741137027740479}\n"
     ]
    }
   ],
   "source": [
    "max_index = scores.argmax().item()\n",
    "print(\"max_index\", max_index)\n",
    "start_index = max_index // scores.shape[1]\n",
    "end_index = max_index % scores.shape[1]\n",
    "\n",
    "inputs_with_offsets = tokenizer(question, context, return_offsets_mapping=True)\n",
    "print(\"inputs_with_offsets\", inputs_with_offsets)\n",
    "offsets = inputs_with_offsets[\"offset_mapping\"]\n",
    "print(\"offsets\", offsets)\n",
    "start, _ = offsets[start_index]\n",
    "_, end = offsets[end_index]\n",
    "\n",
    "result = {\n",
    "    \"answer\": context[start:end],\n",
    "    \"start\": start,\n",
    "    \"end\": end,\n",
    "    \"score\": float(scores[start_index, end_index]),\n",
    "}\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "No model was supplied, defaulted to distilbert/distilbert-base-cased-distilled-squad and revision 626af31 (https://hf-mirror.com/distilbert/distilbert-base-cased-distilled-squad).\n",
      "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'score': 0.9697462320327759, 'start': 1884, 'end': 1911, 'answer': 'Jax, PyTorch and TensorFlow'}\n"
     ]
    }
   ],
   "source": [
    "from transformers import pipeline\n",
    "\n",
    "question_answerer = pipeline(\"question-answering\")\n",
    "\n",
    "long_context = \"\"\"\n",
    "Transformers: State of the Art NLP\n",
    "\n",
    "Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone.\n",
    "\n",
    "Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments.\n",
    "\n",
    "Why should I use transformers?\n",
    "\n",
    "1. Easy-to-use state-of-the-art models:\n",
    "  - High performance on NLU and NLG tasks.\n",
    "  - Low barrier to entry for educators and practitioners.\n",
    "  - Few user-facing abstractions with just three classes to learn.\n",
    "  - A unified API for using all our pretrained models.\n",
    "  - Lower compute costs, smaller carbon footprint:\n",
    "\n",
    "2. Researchers can share trained models instead of always retraining.\n",
    "  - Practitioners can reduce compute time and production costs.\n",
    "  - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages.\n",
    "\n",
    "3. Choose the right framework for every part of a model's lifetime:\n",
    "  - Train state-of-the-art models in 3 lines of code.\n",
    "  - Move a single model between TF2.0/PyTorch frameworks at will.\n",
    "  - Seamlessly pick the right framework for training, evaluation and production.\n",
    "\n",
    "4. Easily customize a model or an example to your needs:\n",
    "  - We provide examples for each architecture to reproduce the results published by its original authors.\n",
    "  - Model internals are exposed as consistently as possible.\n",
    "  - Model files can be used independently of the library for quick experiments.\n",
    "\n",
    "Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.\n",
    "\"\"\"\n",
    "question = \"Which deep learning libraries back Transformers?\"\n",
    "results = question_answerer(question=question, context=long_context)\n",
    "print(results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[CLS] This sentence is not [SEP]\n",
      "[CLS] is not too long [SEP]\n",
      "[CLS] too long but we [SEP]\n",
      "[CLS] but we are going [SEP]\n",
      "[CLS] are going to split [SEP]\n",
      "[CLS] to split it anyway [SEP]\n",
      "[CLS] it anyway. [SEP]\n"
     ]
    }
   ],
   "source": [
    "sentence = \"This sentence is not too long but we are going to split it anyway.\"\n",
    "inputs = tokenizer(\n",
    "    sentence, truncation=True, return_overflowing_tokens=True, max_length=6, stride=2\n",
    ")\n",
    "\n",
    "for ids in inputs[\"input_ids\"]:\n",
    "    print(tokenizer.decode(ids))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "dict_keys(['input_ids', 'attention_mask', 'overflow_to_sample_mapping'])\n",
      "[0, 0, 0, 0, 0, 0, 0]\n",
      "[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]\n"
     ]
    }
   ],
   "source": [
    "sentence = \"This sentence is not too long but we are going to split it anyway.\"\n",
    "inputs = tokenizer(\n",
    "    sentence, truncation=True, return_overflowing_tokens=True, max_length=6, stride=2\n",
    ")\n",
    "print(inputs.keys())\n",
    "print(inputs[\"overflow_to_sample_mapping\"])\n",
    "\n",
    "sentences = [\n",
    "    \"This sentence is not too long but we are going to split it anyway.\",\n",
    "    \"This sentence is shorter but will still get split.\",\n",
    "]\n",
    "inputs = tokenizer(\n",
    "    sentences, truncation=True, return_overflowing_tokens=True, max_length=6, stride=2\n",
    ")\n",
    "print(inputs[\"overflow_to_sample_mapping\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 384])\n"
     ]
    }
   ],
   "source": [
    "inputs = tokenizer(\n",
    "    question,\n",
    "    long_context,\n",
    "    stride=128,\n",
    "    max_length=384,\n",
    "    padding=\"longest\",\n",
    "    truncation=\"only_second\",\n",
    "    return_overflowing_tokens=True,\n",
    "    return_offsets_mapping=True,\n",
    ")\n",
    "\n",
    "_ = inputs.pop(\"overflow_to_sample_mapping\")\n",
    "offsets = inputs.pop(\"offset_mapping\")\n",
    "\n",
    "inputs = inputs.convert_to_tensors(\"pt\")\n",
    "print(inputs[\"input_ids\"].shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['[CLS]',\n",
       " 'Which',\n",
       " 'deep',\n",
       " 'learning',\n",
       " 'libraries',\n",
       " 'back',\n",
       " 'Transformers',\n",
       " '?',\n",
       " '[SEP]',\n",
       " 'architecture',\n",
       " '##s',\n",
       " 'with',\n",
       " 'over',\n",
       " '10',\n",
       " ',',\n",
       " '000',\n",
       " 'pre',\n",
       " '##tra',\n",
       " '##ined',\n",
       " 'models',\n",
       " ',',\n",
       " 'some',\n",
       " 'in',\n",
       " 'more',\n",
       " 'than',\n",
       " '100',\n",
       " 'languages',\n",
       " '.',\n",
       " '3',\n",
       " '.',\n",
       " 'Cho',\n",
       " '##ose',\n",
       " 'the',\n",
       " 'right',\n",
       " 'framework',\n",
       " 'for',\n",
       " 'every',\n",
       " 'part',\n",
       " 'of',\n",
       " 'a',\n",
       " 'model',\n",
       " \"'\",\n",
       " 's',\n",
       " 'lifetime',\n",
       " ':',\n",
       " '-',\n",
       " 'Train',\n",
       " 'state',\n",
       " '-',\n",
       " 'of',\n",
       " '-',\n",
       " 'the',\n",
       " '-',\n",
       " 'art',\n",
       " 'models',\n",
       " 'in',\n",
       " '3',\n",
       " 'lines',\n",
       " 'of',\n",
       " 'code',\n",
       " '.',\n",
       " '-',\n",
       " 'Move',\n",
       " 'a',\n",
       " 'single',\n",
       " 'model',\n",
       " 'between',\n",
       " 'T',\n",
       " '##F',\n",
       " '##2',\n",
       " '.',\n",
       " '0',\n",
       " '/',\n",
       " 'P',\n",
       " '##y',\n",
       " '##T',\n",
       " '##or',\n",
       " '##ch',\n",
       " 'framework',\n",
       " '##s',\n",
       " 'at',\n",
       " 'will',\n",
       " '.',\n",
       " '-',\n",
       " 'Sea',\n",
       " '##m',\n",
       " '##lessly',\n",
       " 'pick',\n",
       " 'the',\n",
       " 'right',\n",
       " 'framework',\n",
       " 'for',\n",
       " 'training',\n",
       " ',',\n",
       " 'evaluation',\n",
       " 'and',\n",
       " 'production',\n",
       " '.',\n",
       " '4',\n",
       " '.',\n",
       " 'E',\n",
       " '##asily',\n",
       " 'custom',\n",
       " '##ize',\n",
       " 'a',\n",
       " 'model',\n",
       " 'or',\n",
       " 'an',\n",
       " 'example',\n",
       " 'to',\n",
       " 'your',\n",
       " 'needs',\n",
       " ':',\n",
       " '-',\n",
       " 'We',\n",
       " 'provide',\n",
       " 'examples',\n",
       " 'for',\n",
       " 'each',\n",
       " 'architecture',\n",
       " 'to',\n",
       " 'reproduce',\n",
       " 'the',\n",
       " 'results',\n",
       " 'published',\n",
       " 'by',\n",
       " 'its',\n",
       " 'original',\n",
       " 'authors',\n",
       " '.',\n",
       " '-',\n",
       " 'Model',\n",
       " 'internal',\n",
       " '##s',\n",
       " 'are',\n",
       " 'exposed',\n",
       " 'as',\n",
       " 'consistently',\n",
       " 'as',\n",
       " 'possible',\n",
       " '.',\n",
       " '-',\n",
       " 'Model',\n",
       " 'files',\n",
       " 'can',\n",
       " 'be',\n",
       " 'used',\n",
       " 'independently',\n",
       " 'of',\n",
       " 'the',\n",
       " 'library',\n",
       " 'for',\n",
       " 'quick',\n",
       " 'experiments',\n",
       " '.',\n",
       " 'Transformers',\n",
       " 'is',\n",
       " 'backed',\n",
       " 'by',\n",
       " 'the',\n",
       " 'three',\n",
       " 'most',\n",
       " 'popular',\n",
       " 'deep',\n",
       " 'learning',\n",
       " 'libraries',\n",
       " '—',\n",
       " 'Jax',\n",
       " ',',\n",
       " 'P',\n",
       " '##y',\n",
       " '##T',\n",
       " '##or',\n",
       " '##ch',\n",
       " 'and',\n",
       " 'Ten',\n",
       " '##sor',\n",
       " '##F',\n",
       " '##low',\n",
       " '—',\n",
       " 'with',\n",
       " 'a',\n",
       " 'sea',\n",
       " '##m',\n",
       " '##less',\n",
       " 'integration',\n",
       " 'between',\n",
       " 'them',\n",
       " '.',\n",
       " 'It',\n",
       " \"'\",\n",
       " 's',\n",
       " 'straightforward',\n",
       " 'to',\n",
       " 'train',\n",
       " 'your',\n",
       " 'models',\n",
       " 'with',\n",
       " 'one',\n",
       " 'before',\n",
       " 'loading',\n",
       " 'them',\n",
       " 'for',\n",
       " 'in',\n",
       " '##ference',\n",
       " 'with',\n",
       " 'the',\n",
       " 'other',\n",
       " '.',\n",
       " '[SEP]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]',\n",
       " '[PAD]']"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "[tokenizer.decode(ids) for ids in inputs[\"input_ids\"][1]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([  101,  5979,  1996,  3776,  9818,  1171, 25267,   136,   102, 25267,\n",
       "          131,  1426,  1104,  1103,  2051, 21239,  2101, 25267,  2790,  4674,\n",
       "         1104,  3073,  4487,  9044,  3584,  1106,  3870,  8249,  1113,  6685,\n",
       "         1216,  1112,  5393,   117,  1869, 16026,   117,  2304, 10937,   117,\n",
       "         7584,  7317,  2734,   117,  5179,   117,  3087,  3964,  1105,  1167,\n",
       "         1107,  1166,  1620,  3483,   119,  2098,  6457,  1110,  1106,  1294,\n",
       "         5910,   118,  2652, 21239,  2101,  5477,  1106,  1329,  1111,  2490,\n",
       "          119, 25267,  2790, 20480,  1116,  1106,  1976,  9133,  1105,  1329,\n",
       "         1343,  3073,  4487,  9044,  3584,  1113,   170,  1549,  3087,   117,\n",
       "         2503,   118,  9253,  1172,  1113,  1240,  1319,  2233, 27948,  1105,\n",
       "         1173,  2934,  1172,  1114,  1103,  1661,  1113,  1412,  2235, 10960,\n",
       "          119,  1335,  1103,  1269,  1159,   117,  1296,   185, 25669,  8613,\n",
       "        13196, 13682,  1126,  4220,  1110,  3106,  2484, 20717,  1673,  1105,\n",
       "         1169,  1129,  5847,  1106,  9396,  3613,  1844,  7857,   119,  2009,\n",
       "         1431,   146,  1329, 11303,  1468,   136,   122,   119, 12167,   118,\n",
       "         1106,   118,  1329,  1352,   118,  1104,   118,  1103,   118,  1893,\n",
       "         3584,   131,   118,  1693,  2099,  1113, 21239,  2591,  1105, 21239,\n",
       "         2349,  8249,   119,   118,  8274,  9391,  1106,  3990,  1111, 24937,\n",
       "         1105, 16681,   119,   118, 17751,  4795,   118,  4749, 11108,  5266,\n",
       "         1114,  1198,  1210,  3553,  1106,  3858,   119,   118,   138, 13943,\n",
       "        20480,  1111,  1606,  1155,  1412,  3073,  4487,  9044,  3584,   119,\n",
       "          118,  5738,  3254, 22662,  4692,   117,  2964,  6302,  2555, 10988,\n",
       "          131,   123,   119, 26982,  1169,  2934,  3972,  3584,  1939,  1104,\n",
       "         1579,  1231,  4487, 16534,   119,   118,   153, 19366,  3121,  2116,\n",
       "         1468,  1169,  4851,  3254, 22662,  1159,  1105,  1707,  4692,   119,\n",
       "          118,  2091, 10947,  1116,  1104,  4220,  1116,  1114,  1166,  1275,\n",
       "          117,  1288,  3073,  4487,  9044,  3584,   117,  1199,  1107,  1167,\n",
       "         1190,  1620,  3483,   119,   124,   119, 22964,  6787,  1103,  1268,\n",
       "         8297,  1111,  1451,  1226,  1104,   170,  2235,   112,   188,  7218,\n",
       "          131,   118,  9791,  1352,   118,  1104,   118,  1103,   118,  1893,\n",
       "         3584,  1107,   124,  2442,  1104,  3463,   119,   118, 15729,   170,\n",
       "         1423,  2235,  1206,   157,  2271,  1477,   119,   121,   120,   153,\n",
       "         1183,  1942,  1766,  1732,  8297,  1116,  1120,  1209,   119,   118,\n",
       "         3017,  1306,  8709,  3368,  1103,  1268,  8297,  1111,  2013,   117,\n",
       "        10540,  1105,  1707,   119,   125,   119,   142, 20158,  8156,  3708,\n",
       "          170,  2235,  1137,  1126,  1859,  1106,  1240,  2993,   131,   118,\n",
       "         1284,  2194,  5136,  1111,  1296,  4220,  1106, 23577,  1103,  2686,\n",
       "         1502,  1118,  1157,  1560,  5752,   119,   118,  6747,  4422,  1116,\n",
       "         1132,  5490,  1112,   102])"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "inputs[\"input_ids\"][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([  101,  5979,  1996,  3776,  9818,  1171, 25267,   136,   102,  4220,\n",
       "         1116,  1114,  1166,  1275,   117,  1288,  3073,  4487,  9044,  3584,\n",
       "          117,  1199,  1107,  1167,  1190,  1620,  3483,   119,   124,   119,\n",
       "        22964,  6787,  1103,  1268,  8297,  1111,  1451,  1226,  1104,   170,\n",
       "         2235,   112,   188,  7218,   131,   118,  9791,  1352,   118,  1104,\n",
       "          118,  1103,   118,  1893,  3584,  1107,   124,  2442,  1104,  3463,\n",
       "          119,   118, 15729,   170,  1423,  2235,  1206,   157,  2271,  1477,\n",
       "          119,   121,   120,   153,  1183,  1942,  1766,  1732,  8297,  1116,\n",
       "         1120,  1209,   119,   118,  3017,  1306,  8709,  3368,  1103,  1268,\n",
       "         8297,  1111,  2013,   117, 10540,  1105,  1707,   119,   125,   119,\n",
       "          142, 20158,  8156,  3708,   170,  2235,  1137,  1126,  1859,  1106,\n",
       "         1240,  2993,   131,   118,  1284,  2194,  5136,  1111,  1296,  4220,\n",
       "         1106, 23577,  1103,  2686,  1502,  1118,  1157,  1560,  5752,   119,\n",
       "          118,  6747,  4422,  1116,  1132,  5490,  1112, 10887,  1112,  1936,\n",
       "          119,   118,  6747,  7004,  1169,  1129,  1215,  8942,  1104,  1103,\n",
       "         3340,  1111,  3613,  7857,   119, 25267,  1110,  5534,  1118,  1103,\n",
       "         1210,  1211,  1927,  1996,  3776,  9818,   783, 13612,   117,   153,\n",
       "         1183,  1942,  1766,  1732,  1105,  5157, 21484,  2271,  6737,   783,\n",
       "         1114,   170,  2343,  1306,  2008,  9111,  1206,  1172,   119,  1135,\n",
       "          112,   188, 21546,  1106,  2669,  1240,  3584,  1114,  1141,  1196,\n",
       "        10745,  1172,  1111,  1107, 16792,  1114,  1103,  1168,   119,   102,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
       "            0,     0,     0,     0])"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "inputs[\"input_ids\"][1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 384]) torch.Size([2, 384])\n"
     ]
    }
   ],
   "source": [
    "outputs = model(**inputs)\n",
    "\n",
    "start_logits = outputs.start_logits\n",
    "end_logits = outputs.end_logits\n",
    "print(start_logits.shape, end_logits.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sequence_ids 384 [None, 0, 0, 0, 0, 0, 0, 0, None, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, None]\n",
      "torch.Size([2, 384]) tensor([False,  True,  True,  True,  True,  True,  True,  True,  True, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False,  True])\n",
      "torch.Size([2, 384]) tensor([False,  True,  True,  True,  True,  True,  True,  True,  True, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True])\n"
     ]
    }
   ],
   "source": [
    "sequence_ids = inputs.sequence_ids()\n",
    "print(\"sequence_ids\", len(sequence_ids), sequence_ids)\n",
    "# Mask everything apart from the tokens of the context\n",
    "mask = [i != 1 for i in sequence_ids]\n",
    "# Unmask the [CLS] token\n",
    "mask[0] = False\n",
    "# Mask all the [PAD] tokens\n",
    "mask = torch.logical_or(torch.tensor(mask)[None], (inputs[\"attention_mask\"] == 0))\n",
    "\n",
    "start_logits[mask] = -10000\n",
    "end_logits[mask] = -10000\n",
    "\n",
    "start_probabilities = torch.nn.functional.softmax(start_logits, dim=-1)\n",
    "end_probabilities = torch.nn.functional.softmax(end_logits, dim=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 384]) tensor([False,  True,  True,  True,  True,  True,  True,  True,  True, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False,  True])\n"
     ]
    }
   ],
   "source": [
    "print(mask.shape, mask[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 384]) tensor([False,  True,  True,  True,  True,  True,  True,  True,  True, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "        False, False, False, False, False, False, False, False, False, False,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
      "         True,  True,  True,  True])\n"
     ]
    }
   ],
   "source": [
    "print(mask.shape, mask[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'answer': '', 'start': 0, 'end': 0, 'score': 0.6493728756904602}\n",
      "{'answer': 'Jax, PyTorch and TensorFlow', 'start': 1884, 'end': 1911, 'score': 0.9697461128234863}\n"
     ]
    }
   ],
   "source": [
    "candidates = []\n",
    "for start_probs, end_probs in zip(start_probabilities, end_probabilities):\n",
    "    scores = start_probs[:, None] * end_probs[None, :]\n",
    "    idx = torch.triu(scores).argmax().item()\n",
    "\n",
    "    start_idx = idx // scores.shape[0]\n",
    "    end_idx = idx % scores.shape[0]\n",
    "    score = scores[start_idx, end_idx].item()\n",
    "    candidates.append((start_idx, end_idx, score))\n",
    "\n",
    "for candidate, offset in zip(candidates, offsets):\n",
    "    start_token, end_token, score = candidate\n",
    "    start_char, _ = offset[start_token]\n",
    "    _, end_char = offset[end_token]\n",
    "    answer = long_context[start_char:end_char]\n",
    "    result = {\"answer\": answer, \"start\": start_char, \"end\": end_char, \"score\": score}\n",
    "    print(result)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
