{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "| [09_dialogue/01_对话模型.ipynb](https://github.com/shibing624/nlp-tutorial/tree/main/09_dialogue/01_对话模型.ipynb)  | 基于transformers的Bert问答模型  |[Open In Colab](https://colab.research.google.com/github/shibing624/nlp-tutorial/blob/main/09_dialogue/01_对话模型.ipynb) |\n",
    "\n",
    "\n",
    "# 对话模型\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用transformers的Bert模型完成阅读理解任务。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install transformers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "pycharm": {
     "is_executing": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bert-base-chinese\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-chinese were not used when initializing BertForQuestionAnswering: ['cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias']\n",
      "- This IS expected if you are initializing BertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Some weights of BertForQuestionAnswering were not initialized from the model checkpoint at bert-base-chinese and are newly initialized: ['qa_outputs.bias', 'qa_outputs.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Answer: '图像分类', score: 0.0008, start: 63, end: 67\n",
      "Answer: '可', score: 0.0007, start: 28, end: 29\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "\n",
    "from transformers import pipeline\n",
    "from transformers import AutoModelForQuestionAnswering, BertTokenizer\n",
    "os.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n",
    "bert_model = 'bert-base-chinese'\n",
    "print(bert_model)\n",
    "# model = AutoModelForQuestionAnswering.from_pretrained(bert_model_dir)\n",
    "# tokenizer = BertTokenizer.from_pretrained(bert_model_dir)\n",
    "nlp = pipeline(\"question-answering\",\n",
    "               model=bert_model,\n",
    "               tokenizer=bert_model,\n",
    "               device=-1,  # gpu device id\n",
    "               )\n",
    "context = r\"\"\"\n",
    "大家好，我是张亮，目前任职当当架构部架构师一职，也是高可用架构群的一员。我为大家提供了一份imagenet数据集，希望能够为图像分类任务做点贡献。\n",
    "\"\"\"\n",
    "\n",
    "# context = ' '.join(list(context))\n",
    "\n",
    "result = nlp(question=\"张亮在哪里任职?\", context=context)\n",
    "print(\n",
    "    f\"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\")\n",
    "result = nlp(question=\"张亮为图像分类提供了什么数据集?\", context=context)\n",
    "print(\n",
    "    f\"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "# Custom Predict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForQuestionAnswering: ['cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias']\n",
      "- This IS expected if you are initializing BertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Some weights of BertForQuestionAnswering were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['qa_outputs.bias', 'qa_outputs.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "inputs:{'input_ids': tensor([[  101,  2129,  2116,  3653, 23654,  2098,  4275,  2024,  2800,  1999,\n",
      "         19081,  1029,   102, 19081,  1006,  3839,  2124,  2004,  1052, 22123,\n",
      "          2953,  2818,  1011, 19081,  1998,  1052, 22123,  2953,  2818,  1011,\n",
      "          3653, 23654,  2098,  1011, 14324,  1007,  3640,  2236,  1011,  3800,\n",
      "          4294,  2015,  1006, 14324,  1010, 14246,  2102,  1011,  1016,  1010,\n",
      "         23455,  1010, 28712,  2213,  1010,  4487, 16643, 23373,  1010, 28712,\n",
      "          7159,  1529,  1007,  2005,  3019,  2653,  4824,  1006, 17953,  2226,\n",
      "          1007,  1998,  3019,  2653,  4245,  1006, 17953,  2290,  1007,  2007,\n",
      "          2058,  3590,  1009,  3653, 23654,  2098,  4275,  1999,  2531,  1009,\n",
      "          4155,  1998,  2784,  6970, 25918,  8010,  2090, 23435, 12314,  1016,\n",
      "          1012,  1014,  1998,  1052, 22123,  2953,  2818,  1012,   102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n",
      "QuestionAnsweringModelOutput(loss=None, start_logits=tensor([[-0.1809, -0.2877, -0.0165,  0.4002,  0.0336,  0.1665,  0.0559, -0.1454,\n",
      "          0.1015, -0.0070,  0.8802, -0.3729, -0.1835,  0.4728, -0.4547, -0.1775,\n",
      "         -0.4006, -0.3673, -0.1061,  0.0500, -0.3471,  0.3389, -0.1820,  0.4635,\n",
      "         -0.0337,  0.0204,  0.1292, -0.2840,  0.1941, -0.1104,  0.4291,  0.0289,\n",
      "         -0.0887, -0.2625, -0.2431,  0.0716, -0.2869, -0.3096,  0.0168, -0.1081,\n",
      "         -0.0607, -0.0331, -0.2991, -0.0309, -0.1091, -0.0538,  0.1684, -0.1696,\n",
      "         -0.2679, -0.0892,  0.0739, -0.0975,  0.1413, -0.2315, -0.1620,  0.2797,\n",
      "         -0.1329, -0.0229, -0.0884,  0.2812, -0.2193, -0.1055,  0.0775, -0.3384,\n",
      "          0.5046,  0.2235,  0.0127, -0.8437,  0.2458, -0.5693, -0.4947, -0.2014,\n",
      "          0.5231,  0.1126,  0.1408, -0.6467,  0.0618, -0.3228, -0.2658, -0.0526,\n",
      "         -0.1529,  0.1490,  0.1098,  0.2663,  0.0697,  0.0606, -0.0394, -0.1831,\n",
      "         -0.2320,  0.0039, -0.0256,  0.1474, -0.1038, -0.0377, -0.1779, -0.2964,\n",
      "         -0.1789,  0.0389, -0.0369, -0.1093,  0.3261,  0.5475, -0.0064,  0.1163,\n",
      "          0.2235, -0.2147,  0.2724,  0.1317,  0.1255]],\n",
      "       grad_fn=<CopyBackwards>), end_logits=tensor([[ 2.8367e-01,  2.5896e-01, -2.1378e-03,  3.7394e-01,  3.0050e-01,\n",
      "         -5.1480e-02, -1.0367e-01,  1.2521e-01, -1.0521e-01,  9.9725e-02,\n",
      "         -1.3603e-01,  3.6162e-01, -4.4476e-01, -5.2431e-02, -1.8647e-01,\n",
      "          2.8659e-01,  3.3774e-01,  2.1877e-01,  3.5400e-01, -1.9227e-01,\n",
      "         -2.4909e-01, -1.8762e-01,  5.8715e-02, -1.9544e-01, -3.8624e-01,\n",
      "          3.1786e-01,  1.7643e-03, -2.5867e-01, -2.0830e-01,  1.0307e-01,\n",
      "          1.6470e-01,  3.1534e-01, -1.0481e-01,  4.4963e-04, -9.7638e-02,\n",
      "         -4.5372e-01,  2.3739e-01,  2.2678e-01, -2.3499e-03, -1.4970e-01,\n",
      "          2.5346e-03,  1.8378e-02, -1.6756e-01, -1.6163e-01, -9.5248e-02,\n",
      "          1.0249e-01, -1.0247e-01,  6.5130e-02, -8.3423e-02, -2.6889e-02,\n",
      "         -1.0063e-01, -8.6664e-02, -3.3082e-01, -2.2176e-01, -4.2514e-02,\n",
      "          3.0036e-01, -2.4872e-02, -3.1927e-01, -1.1688e-01, -3.4541e-01,\n",
      "         -1.5358e-01, -2.3746e-01, -4.4361e-01,  5.0043e-02,  2.5153e-01,\n",
      "          1.6245e-01,  1.4552e-01, -4.3434e-01,  4.5262e-01, -1.1102e-01,\n",
      "         -2.9943e-01, -4.5870e-01,  1.9120e-01, -3.8264e-02, -5.3842e-02,\n",
      "         -3.8023e-01,  4.3782e-02, -2.5112e-01, -4.8800e-01,  7.1485e-02,\n",
      "          3.3808e-01,  3.3097e-02,  1.9771e-01,  4.6148e-01,  4.4605e-01,\n",
      "          1.5816e-01, -1.0610e-01,  2.5530e-01,  2.2130e-01, -1.8510e-02,\n",
      "          1.7927e-01, -4.9470e-02,  3.5844e-02,  1.9333e-01,  7.7184e-02,\n",
      "         -2.5765e-01, -1.9185e-01, -3.9558e-01, -3.3579e-02, -4.7030e-01,\n",
      "          4.3372e-02,  1.9506e-01, -4.2808e-01,  3.2651e-01, -1.2341e-01,\n",
      "         -3.6785e-01, -1.7043e-01, -3.8583e-01, -3.9197e-01]],\n",
      "       grad_fn=<CopyBackwards>), hidden_states=None, attentions=None)\n",
      "Question: How many pretrained models are available in Transformers?\n",
      "Answer: transformers ? [SEP] transformers ( formerly known as pytorch - transformers and pytorch - pretrained - bert ) provides general - purpose architectures ( bert , gpt - 2 , roberta , xlm , distilbert , xlnet … ) for natural language understanding ( nlu ) and natural language generation ( nlg ) with over 32 + pre\n",
      "inputs:{'input_ids': tensor([[  101,  2054,  2515, 19081,  3073,  1029,   102, 19081,  1006,  3839,\n",
      "          2124,  2004,  1052, 22123,  2953,  2818,  1011, 19081,  1998,  1052,\n",
      "         22123,  2953,  2818,  1011,  3653, 23654,  2098,  1011, 14324,  1007,\n",
      "          3640,  2236,  1011,  3800,  4294,  2015,  1006, 14324,  1010, 14246,\n",
      "          2102,  1011,  1016,  1010, 23455,  1010, 28712,  2213,  1010,  4487,\n",
      "         16643, 23373,  1010, 28712,  7159,  1529,  1007,  2005,  3019,  2653,\n",
      "          4824,  1006, 17953,  2226,  1007,  1998,  3019,  2653,  4245,  1006,\n",
      "         17953,  2290,  1007,  2007,  2058,  3590,  1009,  3653, 23654,  2098,\n",
      "          4275,  1999,  2531,  1009,  4155,  1998,  2784,  6970, 25918,  8010,\n",
      "          2090, 23435, 12314,  1016,  1012,  1014,  1998,  1052, 22123,  2953,\n",
      "          2818,  1012,   102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1]])}\n",
      "QuestionAnsweringModelOutput(loss=None, start_logits=tensor([[-0.2408, -0.3242, -0.1797,  0.6169,  0.0417, -0.3710, -0.2016,  0.4332,\n",
      "         -0.5477, -0.2650, -0.5124, -0.4409, -0.0916,  0.0880, -0.3134,  0.3066,\n",
      "         -0.1946,  0.3934, -0.0532, -0.0072,  0.1285, -0.3057,  0.1608, -0.2389,\n",
      "          0.4742,  0.0752, -0.0670, -0.3411, -0.2476, -0.0481, -0.3412, -0.3899,\n",
      "         -0.0604, -0.2416, -0.1298, -0.1454, -0.3557, -0.0193, -0.0854,  0.0186,\n",
      "          0.1549, -0.1147, -0.2525, -0.0664,  0.1062, -0.0861,  0.1574, -0.2460,\n",
      "         -0.1467,  0.2988, -0.1579, -0.0478, -0.0975,  0.2965, -0.2022, -0.1138,\n",
      "          0.0259, -0.4198,  0.4520,  0.1559, -0.0414, -0.9058,  0.1547, -0.6555,\n",
      "         -0.1351, -0.2330,  0.5743,  0.1342,  0.1493, -0.6522,  0.0437, -0.3398,\n",
      "         -0.0393, -0.0524, -0.1350,  0.1385,  0.0277,  0.2441, -0.0025, -0.0201,\n",
      "         -0.1818, -0.1891, -0.2772, -0.0393, -0.0518,  0.1302, -0.1394, -0.1579,\n",
      "         -0.2555, -0.3490, -0.1932,  0.0184, -0.0853, -0.1448,  0.2969,  0.5308,\n",
      "         -0.0580,  0.1076,  0.2542, -0.1845,  0.2440,  0.0826,  0.0735]],\n",
      "       grad_fn=<CopyBackwards>), end_logits=tensor([[ 0.1823,  0.0359,  0.3045, -0.0354, -0.1770,  0.4300,  0.1478, -0.0391,\n",
      "         -0.0986,  0.2562,  0.3280,  0.2375,  0.3945, -0.1567, -0.1989, -0.1331,\n",
      "          0.0743, -0.1901, -0.3644,  0.3466, -0.0298, -0.2586, -0.1648,  0.1185,\n",
      "          0.0202,  0.1418, -0.2502,  0.0157, -0.0326, -0.5031,  0.2803,  0.2243,\n",
      "         -0.0031, -0.1720, -0.0053,  0.0416, -0.1095, -0.1156, -0.0571,  0.1197,\n",
      "         -0.1107,  0.1019, -0.0686, -0.0046, -0.1078, -0.0649, -0.4116, -0.2064,\n",
      "         -0.0240,  0.3087, -0.0278, -0.3537, -0.1139, -0.3824, -0.1282, -0.2433,\n",
      "         -0.4417,  0.0797,  0.2318,  0.0654,  0.1671, -0.4001,  0.4098, -0.1204,\n",
      "         -0.5647, -0.4588,  0.2694, -0.0161,  0.0386, -0.2941,  0.1154, -0.2020,\n",
      "         -0.4910,  0.1104,  0.2738, -0.0052,  0.1583,  0.2943,  0.2594, -0.0053,\n",
      "         -0.2516,  0.1776,  0.1894, -0.0161,  0.1606, -0.0303,  0.0756,  0.2948,\n",
      "          0.1259, -0.1354, -0.1815, -0.3527, -0.0125, -0.4096,  0.0760,  0.2558,\n",
      "         -0.3964,  0.3742, -0.0565, -0.3176, -0.1113, -0.3844, -0.3925]],\n",
      "       grad_fn=<CopyBackwards>), hidden_states=None, attentions=None)\n",
      "Question: What does Transformers provide?\n",
      "Answer: transformers provide ?\n",
      "inputs:{'input_ids': tensor([[  101, 19081,  3640,  6970, 25918,  8010,  2090,  2029,  7705,  2015,\n",
      "          1029,   102, 19081,  1006,  3839,  2124,  2004,  1052, 22123,  2953,\n",
      "          2818,  1011, 19081,  1998,  1052, 22123,  2953,  2818,  1011,  3653,\n",
      "         23654,  2098,  1011, 14324,  1007,  3640,  2236,  1011,  3800,  4294,\n",
      "          2015,  1006, 14324,  1010, 14246,  2102,  1011,  1016,  1010, 23455,\n",
      "          1010, 28712,  2213,  1010,  4487, 16643, 23373,  1010, 28712,  7159,\n",
      "          1529,  1007,  2005,  3019,  2653,  4824,  1006, 17953,  2226,  1007,\n",
      "          1998,  3019,  2653,  4245,  1006, 17953,  2290,  1007,  2007,  2058,\n",
      "          3590,  1009,  3653, 23654,  2098,  4275,  1999,  2531,  1009,  4155,\n",
      "          1998,  2784,  6970, 25918,  8010,  2090, 23435, 12314,  1016,  1012,\n",
      "          1014,  1998,  1052, 22123,  2953,  2818,  1012,   102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "QuestionAnsweringModelOutput(loss=None, start_logits=tensor([[-1.2878e-01,  3.0782e-01, -2.4559e-01, -1.4951e-01, -1.1949e-01,\n",
      "         -1.6474e-01, -2.4681e-01,  2.1168e-01,  2.8502e-01, -2.6541e-01,\n",
      "         -3.9923e-01, -7.1881e-02,  5.4817e-01, -4.5537e-01, -2.4568e-01,\n",
      "         -5.1151e-01, -4.4427e-01, -1.2528e-01,  7.1618e-02, -3.4776e-01,\n",
      "          2.7170e-01, -1.9582e-01,  4.6603e-01, -6.5703e-02, -1.9252e-02,\n",
      "          9.8797e-02, -3.3767e-01,  1.2233e-01, -2.0983e-01,  4.6932e-01,\n",
      "          6.0442e-02, -7.5030e-02, -3.2702e-01, -2.5363e-01,  9.7604e-03,\n",
      "         -2.9355e-01, -3.0845e-01, -5.5540e-02, -1.4168e-01, -3.8999e-03,\n",
      "         -8.2347e-02, -3.4380e-01, -4.9974e-03, -1.0133e-01, -6.8717e-02,\n",
      "          1.5717e-01, -1.1907e-01, -2.6423e-01, -7.4110e-02,  7.7396e-02,\n",
      "         -8.3980e-02,  1.2772e-01, -2.6426e-01, -1.6226e-01,  2.6700e-01,\n",
      "         -1.3828e-01, -5.9531e-02, -9.3357e-02,  2.8573e-01, -2.6597e-01,\n",
      "         -1.0377e-01,  3.4796e-02, -3.9672e-01,  5.3762e-01,  2.4587e-01,\n",
      "          2.3842e-03, -8.0429e-01,  1.8766e-01, -5.5790e-01, -3.7884e-01,\n",
      "         -2.2374e-01,  5.2760e-01,  1.3873e-01,  1.4633e-01, -6.4821e-01,\n",
      "          2.3705e-02, -3.2360e-01, -1.0756e-01, -1.0982e-01, -1.4262e-01,\n",
      "          1.0415e-01, -4.5801e-03,  3.0241e-01,  4.2355e-02, -2.1504e-02,\n",
      "         -1.5838e-01, -1.8363e-01, -2.4006e-01, -3.4591e-02, -5.6037e-02,\n",
      "          1.4522e-01,  8.3217e-03, -1.0695e-01, -7.6365e-02, -3.4362e-01,\n",
      "         -3.0419e-01, -7.9659e-04, -7.8314e-02, -1.4461e-01,  2.8109e-01,\n",
      "          4.8789e-01, -1.7987e-02,  8.8863e-02,  2.6843e-01, -2.4237e-01,\n",
      "          2.5315e-01,  8.9432e-02,  8.5800e-02]], grad_fn=<CopyBackwards>), end_logits=tensor([[ 0.1890, -0.0484,  0.4225,  0.4361,  0.2017,  0.0017,  0.0335, -0.1467,\n",
      "         -0.1236, -0.1769,  0.4909,  0.0763, -0.0352, -0.1381,  0.2992,  0.2621,\n",
      "          0.1788,  0.3808, -0.0629, -0.1718, -0.1301,  0.1320, -0.1611, -0.3742,\n",
      "          0.4111,  0.0807, -0.1648, -0.1758,  0.1150,  0.0042,  0.1632, -0.2380,\n",
      "          0.0617, -0.1022, -0.4843,  0.1492,  0.2701,  0.0187, -0.1056,  0.0493,\n",
      "          0.0772, -0.0766, -0.1641, -0.0656,  0.1295, -0.0679,  0.1194, -0.0679,\n",
      "          0.0049, -0.1037, -0.0608, -0.3600, -0.2229, -0.0123,  0.3438,  0.0106,\n",
      "         -0.3323, -0.0948, -0.3007, -0.1293, -0.2522, -0.4584,  0.1336,  0.2565,\n",
      "          0.0513,  0.1396, -0.3676,  0.4401, -0.1549, -0.4992, -0.4714,  0.2192,\n",
      "         -0.1224, -0.0632, -0.2633,  0.0717, -0.2700, -0.5584,  0.1545,  0.2792,\n",
      "          0.0203,  0.2158,  0.3512,  0.2636, -0.0007, -0.2068,  0.2219,  0.2237,\n",
      "          0.0648,  0.1466, -0.0611,  0.0753,  0.4327,  0.2063, -0.1049, -0.0986,\n",
      "         -0.2600, -0.0422, -0.3900,  0.1427,  0.2983, -0.4283,  0.3893, -0.0194,\n",
      "         -0.3340, -0.1135, -0.4020, -0.4071]], grad_fn=<CopyBackwards>), hidden_states=None, attentions=None)\n",
      "Question: Transformers provides interoperability between which frameworks?\n",
      "Answer: \n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoTokenizer, AutoModelForQuestionAnswering\n",
    "import torch\n",
    "bert_model = 'bert-base-uncased'\n",
    "# tokenizer = AutoTokenizer.from_pretrained(\"bert-large-uncased-whole-word-masking-finetuned-squad\")\n",
    "# model = AutoModelForQuestionAnswering.from_pretrained(\"bert-large-uncased-whole-word-masking-finetuned-squad\")\n",
    "model = AutoModelForQuestionAnswering.from_pretrained(bert_model)\n",
    "tokenizer = BertTokenizer.from_pretrained(bert_model)\n",
    "\n",
    "text = r\"\"\"\n",
    "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose\n",
    "architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet…) for Natural Language Understanding (NLU) and Natural\n",
    "Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between\n",
    "TensorFlow 2.0 and PyTorch.\n",
    "\"\"\"\n",
    "questions = [\n",
    "    \"How many pretrained models are available in Transformers?\",\n",
    "    \"What does Transformers provide?\",\n",
    "    \"Transformers provides interoperability between which frameworks?\",\n",
    "]\n",
    "for question in questions:\n",
    "    inputs = tokenizer(question, text, add_special_tokens=True, return_tensors=\"pt\")\n",
    "    print(f'inputs:{inputs}')\n",
    "    input_ids = inputs[\"input_ids\"].tolist()[0]\n",
    "    outputs = model(**inputs)\n",
    "    print(outputs)\n",
    "    answer_start_scores = outputs.start_logits\n",
    "    answer_end_scores = outputs.end_logits\n",
    "    answer_start = torch.argmax(\n",
    "        answer_start_scores\n",
    "    )  # Get the most likely beginning of answer with the argmax of the score\n",
    "    answer_end = torch.argmax(answer_end_scores) + 1  # Get the most likely end of answer with the argmax of the score\n",
    "    answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))\n",
    "    print(f\"Question: {question}\")\n",
    "    print(f\"Answer: {answer}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}