{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "00539cf9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "import json\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "from transformers import BertTokenizer, BertForMaskedLM, BertModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "26d73513",
   "metadata": {},
   "outputs": [],
   "source": [
    "cpu = torch.device('cpu')\n",
    "cuda = torch.device('cuda')     # Default CUDA device\n",
    "cuda0 = torch.device('cuda:0')\n",
    "cuda2 = torch.device('cuda:2')  # GPU 2 (these are 0-indexed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "7e67223f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cpu cuda:0\n"
     ]
    }
   ],
   "source": [
    "a = torch.tensor([1,2,3], device=cpu)\n",
    "b = torch.tensor([1,2,3], device=cuda)\n",
    "print(a.device,b.device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "2d8a151c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1, 2, 3], device='cuda:0')\n"
     ]
    }
   ],
   "source": [
    "cuda = torch.device('cuda')  \n",
    "b = torch.tensor([1,2,3], device=cuda)\n",
    "c = b.clone()\n",
    "print(c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "6241f047",
   "metadata": {},
   "outputs": [],
   "source": [
    "WORK_DIR = \"/data/disk5/private/yuc/coref/bert-tagger\"\n",
    "FILE_LIST = os.path.join(WORK_DIR, \"playground/filelist.txt\")\n",
    "WIKI_DIR = os.path.join(WORK_DIR, \"../wikipedia/text\")\n",
    "DUMP_DIR = os.path.join(WORK_DIR, \"playground/dump_kl_para\")\n",
    "LOG_DIR = os.path.join(WORK_DIR, \"playground/logs\")\n",
    "PARSED_DATA_DIR = os.path.join(WORK_DIR, \"../wikipedia/parsed-text\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "ba0569f3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Global initialization started.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
      "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Global initialization completed.\n"
     ]
    }
   ],
   "source": [
    "print(\"Global initialization started.\")\n",
    "# global_device = torch.device('cuda')\n",
    "global_device = torch.device('cpu')\n",
    "global_tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n",
    "global_model = BertForMaskedLM.from_pretrained('bert-base-cased').to(global_device)\n",
    "print(\"Global initialization completed.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "b4db03a7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# result: masked;   [B N V]\n",
    "# target: unmasked  [B N V]\n",
    "# index: golden     [B N V]\n",
    "# mask: valid token [B N]\n",
    "\n",
    "def weighted_average(tensor, weight, dim=1):\n",
    "    tensor = tensor * weight\n",
    "    return torch.sum(tensor, dim=dim) / torch.sum(weight, dim=dim)\n",
    "\n",
    "def index_only_dist(result, target, index, mask):\n",
    "    n_dim = 1\n",
    "    v_dim = 2\n",
    "    # [B N]\n",
    "    return weighted_average(\n",
    "        torch.sum(\n",
    "            F.relu(\n",
    "                F.log_softmax(target, dim=v_dim) - F.log_softmax(result, dim=v_dim)\n",
    "            ) * index, dim=v_dim\n",
    "        ),\n",
    "        mask,\n",
    "        dim=n_dim\n",
    "    )\n",
    "\n",
    "def kl_divergence_dist(result, target, index, mask):\n",
    "    n_dim = 1\n",
    "    v_dim = 2\n",
    "    return weighted_average(\n",
    "        torch.sum(\n",
    "            F.softmax(target, dim=v_dim) * ( - F.log_softmax(result, dim=v_dim) + \n",
    "            F.log_softmax(target, dim=v_dim)),\n",
    "            dim=v_dim\n",
    "        ),\n",
    "        mask,\n",
    "        dim=n_dim\n",
    "    )\n",
    "\n",
    "def js_divergence_dist(result, target, index, mask):\n",
    "    return 0.5 * (kl_divergence_dist(result, target, index, mask) +\n",
    "             kl_divergence_dist(target, result, index, mask))\n",
    "\n",
    "def cross_entropy_dist(result, target, index, mask):\n",
    "    n_dim = 1\n",
    "    v_dim = 2\n",
    "    return weighted_average(\n",
    "        torch.sum(\n",
    "            - F.softmax(target, dim=v_dim) * F.log_softmax(result, dim=v_dim),\n",
    "            dim=v_dim\n",
    "        ),\n",
    "        mask,\n",
    "        dim=n_dim\n",
    "    )\n",
    "\n",
    "def inv_softmax(tensor, dim=1):\n",
    "    res = torch.clone(tensor)\n",
    "    base = torch.amin(res, dim=dim)\n",
    "    base = base.unsqueeze(1)\n",
    "    res = torch.log(res / base)\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "5f6362ee",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-0.6931, -0.6931])\n",
      "tensor([1.0397, 0.0000])\n",
      "tensor([0.7006, 0.6678])\n",
      "tensor([0.6842, 0.6842])\n",
      "tensor([1.4788, 1.4459])\n"
     ]
    }
   ],
   "source": [
    "# batch size = 2, n dim = 2, v dim = 3\n",
    "mask = torch.ones([2,2])\n",
    "p = torch.tensor([[0.10, 0.40, 0.50], [0.80, 0.15, 0.05]])\n",
    "p = inv_softmax(p)\n",
    "# notice that p1 and p2 are logits\n",
    "p1 = torch.tensor([[[1,0],[1,0]], [[0,1], [0,1]]], dtype=torch.float32)\n",
    "p1 = p1 @ p\n",
    "p2 = torch.tensor([[[0,1],[1,0]], [[1,0], [0,1]]], dtype=torch.float32)\n",
    "p2 = p2 @ p\n",
    "p3 = torch.tensor([[[1, 0, 0], [0, 0, 1]], [[1, 0, 0], [0, 1, 0]]])\n",
    "print(F.log_softmax(torch.tensor([1.,1.]), dim=0)) # torch log softmax is based on e, and ln(2) == 0.693\n",
    "print(index_only_dist(p1, p2, p3, mask))\n",
    "print(kl_divergence_dist(p1, p2, p3, mask)) # should be about 1.401/2, 1.336/2 or 0.700, 0.668\n",
    "print(js_divergence_dist(p1, p2, p3, mask)) # should be about 0.684\n",
    "print(cross_entropy_dist(p1, p2, p3, mask)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "71a28225",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cochran – armitage test for trend\n",
      "{'tokens': tensor([  101,  1884,  1732,  4047,   782,  1981,  5168,  2176,  2774,  1111,\n",
      "        10209,   102]), 'raw': ['[CLS]', 'co', '##ch', '##ran', '–', 'arm', '##ita', '##ge', 'test', 'for', 'trend', '[SEP]']}\n"
     ]
    }
   ],
   "source": [
    "def default_transform(sentence, device=global_device):\n",
    "    if sentence[0] == \"<\":\n",
    "        return None\n",
    "    # raw_tokens: parsed into subword but not yet converted to ids\n",
    "    # raw tokens converted to token ids\n",
    "    tokens = global_tokenizer(sentence, return_tensors=\"pt\")[\"input_ids\"].to(device)\n",
    "    tokens = torch.squeeze(tokens)\n",
    "    # raw_tokens: parsed into subword but not yet converted to ids\n",
    "    raw_tokens = global_tokenizer.convert_ids_to_tokens(tokens)\n",
    "    # l = len(tokens)\n",
    "    l = tokens.shape[0]\n",
    "    if l <= 8 or 256 <= l: # ignore doc that is too long\n",
    "        return None\n",
    "\n",
    "    # this is the format of a sentence.\n",
    "    return {\n",
    "        \"tokens\": tokens,\n",
    "        \"raw\": raw_tokens\n",
    "    }\n",
    "\n",
    "sentence = global_tokenizer.convert_tokens_to_string(['cochran', '–', 'arm', '##ita', '##ge', 'test', 'for', 'trend'])\n",
    "print(sentence)\n",
    "ts = default_transform(sentence)\n",
    "print(ts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "4d6147f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SentenceIterable:\n",
    "    def __init__(self,\n",
    "        file_path_list=FILE_LIST,\n",
    "        file_id=0,\n",
    "        stc_id=0,\n",
    "        transform=default_transform):\n",
    "        self.file_id = file_id\n",
    "        self.stc_id = stc_id\n",
    "        with open(file_path_list, \"r\") as f_list:\n",
    "            self.file_paths = f_list.read().split()\n",
    "        if transform == None:\n",
    "            self.transform = default_transform\n",
    "        else:\n",
    "            self.transform = transform\n",
    "        print(\"SentenceIterable constructed.\")\n",
    "        \n",
    "    def __iter__(self):\n",
    "        return self.sentence_generator()\n",
    "    \n",
    "    def sentence_generator(self):\n",
    "        file_count = len(self.file_paths)\n",
    "        while self.file_id < file_count:\n",
    "            file_path = self.file_paths[self.file_id]\n",
    "            with open(file_path) as fs:\n",
    "                sentences = fs.readlines()\n",
    "                sentence_count = len(sentences)\n",
    "                while self.stc_id < sentence_count:\n",
    "                    sentence = sentences[self.stc_id]\n",
    "                    sentence = self.transform(sentence)\n",
    "                    if sentence == None:\n",
    "                        print(\"sentence discarded.\")\n",
    "                    else:\n",
    "                        yield (sentence, self.file_id, self.stc_id)\n",
    "                    self.stc_id += 1\n",
    "            self.stc_id = 0\n",
    "            self.file_id += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "a8a37e89",
   "metadata": {},
   "outputs": [],
   "source": [
    "def default_transform_with_index_mapping(sentence, device):\n",
    "    # { \"sentence\", \"file_id\", \"sent_id\", \"np_list\" }\n",
    "    tokens = [ 101, ] # [CLS]\n",
    "    index_mapping = []\n",
    "    start, end = 1, 1\n",
    "    for raw_token in sentence[\"sentence\"]:\n",
    "        encoded = global_tokenizer.encode(raw_token, add_special_tokens=False)\n",
    "        tokens.extend(encoded)\n",
    "        end += len(encoded)\n",
    "        index_mapping.append((start, end))\n",
    "        start += len(encoded)\n",
    "    tokens.append(102)    # [SEP]s\n",
    "    l = len(tokens)\n",
    "    if l <= 8 or 128 <= l: # ignore doc that is too long or too short\n",
    "        return None\n",
    "\n",
    "    tokens = torch.tensor(tokens, device=device)\n",
    "    # raw_tokens: parsed into subword but not yet converted to ids\n",
    "    raw_tokens = global_tokenizer.convert_ids_to_tokens(tokens)\n",
    "      \n",
    "    index_as_start = lambda x: index_mapping[x][0]\n",
    "    index_as_end = lambda x: index_mapping[x][1]\n",
    "\n",
    "    np_list = [ [index_as_start(x), index_as_end(y-1)] \n",
    "                        for x, y in sentence[\"np_list\"] ]\n",
    "    \n",
    "    def interval_to_indices(interval, padding):\n",
    "        indices = list(range(*interval))\n",
    "        mask = [1,] * len(indices)\n",
    "        if len(indices) > padding:\n",
    "            print(\"Warning: padding not used.\")\n",
    "            return (indices, mask)\n",
    "        while len(indices) < padding:\n",
    "            indices.append(0)\n",
    "            mask.append(0)\n",
    "        return (indices, mask)\n",
    "\n",
    "    np_max_length = max([y-x for x, y in np_list])\n",
    "    \n",
    "    np_list = [ interval_to_indices(x, np_max_length) for x in np_list]\n",
    "    \n",
    "    sentence[\"np_list\"] = np_list\n",
    "    \n",
    "    # this is the format of a sentence.\n",
    "    transformed = {\n",
    "        \"tokens\": tokens,\n",
    "        \"raw\": raw_tokens\n",
    "    }\n",
    "    transformed.update(sentence)\n",
    "    return transformed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "af285b47",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ParsedSentenceIterable:\n",
    "    def __init__(self,\n",
    "        file_id=0,\n",
    "        stc_id=0,\n",
    "        transform=default_transform_with_index_mapping,\n",
    "        device=global_device):\n",
    "        self.file_id = file_id\n",
    "        self.stc_id = stc_id\n",
    "        self.transform = transform\n",
    "        self.device = device\n",
    "        self.path_to_file_list = os.path.join(PARSED_DATA_DIR, \"filelist.txt\")\n",
    "        if not os.path.exists(self.path_to_file_list):\n",
    "            file_list = []\n",
    "            for root, dirs, files in os.walk(PARSED_DATA_DIR):\n",
    "                for filename in files:\n",
    "                    if filename[-4:] == \"dump\":\n",
    "                        file_list.append(filename)\n",
    "            pattern = re.compile(\"([1-9][0-9]*).dump\")\n",
    "            def sort_key(x):\n",
    "                return eval(pattern.search(x).group(1))\n",
    "            file_list.sort(key=sort_key)\n",
    "            with open(self.path_to_file_list, \"w\") as fl:\n",
    "                for filename in file_list:\n",
    "                    fl.write(filename + \"\\n\")            \n",
    "\n",
    "    def __iter__(self):\n",
    "        return self.sentence_generator()\n",
    "\n",
    "    def sentence_generator(self):\n",
    "        with open(self.path_to_file_list, \"r\") as f_list:\n",
    "            for file_id, file_path in enumerate(f_list):\n",
    "                if file_id < self.file_id:\n",
    "                    continue\n",
    "                file_path = os.path.join(PARSED_DATA_DIR, file_path)\n",
    "                with open(file_path.strip()) as fs:\n",
    "                    for stc_id, line in enumerate(fs):\n",
    "                        # { \"sentence\", \"file_id\", \"sent_id\", \"np_list\" }\n",
    "                        sentence = json.loads(line.strip())\n",
    "                        sentence = self.transform(sentence, self.device)\n",
    "                        if sentence != None:\n",
    "                            yield (sentence, file_id, stc_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "32cd4153",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "({'tokens': tensor([  101,  1109, 10209,  2774,  1110,  3666,  1165,  1103,  2233,  1321,\n",
      "         1103,  1532,  1104,   170,   123,   240,   107,   180,   107, 14255,\n",
      "         1916,  9517,  1952,   119,  1370,  1859,   117,  1191,   107,   180,\n",
      "          107,   134,   124,  1195,  1138,   102]), 'raw': ['[CLS]', 'The', 'trend', 'test', 'is', 'applied', 'when', 'the', 'data', 'take', 'the', 'form', 'of', 'a', '2', '×', '\"', 'k', '\"', 'con', '##ting', '##ency', 'table', '.', 'For', 'example', ',', 'if', '\"', 'k', '\"', '=', '3', 'we', 'have', '[SEP]'], 'sentence': ['The', 'trend', 'test', 'is', 'applied', 'when', 'the', 'data', 'take', 'the', 'form', 'of', 'a', '2', '\\xa0', '×', '\\xa0', '\"', 'k', '\"', 'contingency', 'table', '.', 'For', 'example', ',', 'if', '\"', 'k', '\"', '\\xa0', '=', '\\xa0', '3', 'we', 'have'], 'file_id': 0, 'sent_id': 5, 'np_list': [([1, 2, 3, 0, 0, 0], [1, 1, 1, 0, 0, 0]), ([7, 8, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([10, 11, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([17, 18, 19, 20, 21, 22], [1, 1, 1, 1, 1, 1]), ([25, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]), ([33, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0])]}, 0, 1)\n"
     ]
    }
   ],
   "source": [
    "dataset = ParsedSentenceIterable()\n",
    "dataloader = iter(dataset)\n",
    "parsed_sentence = dataloader.__next__()\n",
    "print(parsed_sentence)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "0edd5fc4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'label': tensor([1, 2, 3, 4, 5]), 'unmasked': tensor([  1, 103,   3,   4,   5]), 'masked': tensor([  1, 103, 103,   4,   5]), 'miss_id': tensor([1]), 'mask_id': tensor([2]), 'miss_mask': tensor([1])}\n",
      "{'label': tensor([1, 2, 3, 4, 5]), 'unmasked': tensor([  1, 103,   3,   4,   5]), 'masked': tensor([  1, 103,   3, 103,   5]), 'miss_id': tensor([1]), 'mask_id': tensor([3]), 'miss_mask': tensor([1])}\n",
      "{'label': tensor([1, 2, 3, 4, 5]), 'unmasked': tensor([  1,   2, 103,   4,   5]), 'masked': tensor([  1, 103, 103,   4,   5]), 'miss_id': tensor([2]), 'mask_id': tensor([1]), 'miss_mask': tensor([1])}\n",
      "{'label': tensor([1, 2, 3, 4, 5]), 'unmasked': tensor([  1,   2, 103,   4,   5]), 'masked': tensor([  1,   2, 103, 103,   5]), 'miss_id': tensor([2]), 'mask_id': tensor([3]), 'miss_mask': tensor([1])}\n",
      "{'label': tensor([1, 2, 3, 4, 5]), 'unmasked': tensor([  1,   2,   3, 103,   5]), 'masked': tensor([  1, 103,   3, 103,   5]), 'miss_id': tensor([3]), 'mask_id': tensor([1]), 'miss_mask': tensor([1])}\n",
      "{'label': tensor([1, 2, 3, 4, 5]), 'unmasked': tensor([  1,   2,   3, 103,   5]), 'masked': tensor([  1,   2, 103, 103,   5]), 'miss_id': tensor([3]), 'mask_id': tensor([2]), 'miss_mask': tensor([1])}\n"
     ]
    }
   ],
   "source": [
    "class QuestionPairIterable(Dataset):\n",
    "    def __init__(self, \n",
    "        sentence,\n",
    "        mask_placeholder=\"[MASK]\",\n",
    "        miss_placeholder=\"[MASK]\",\n",
    "        device=global_device):\n",
    "        super(QuestionPairIterable).__init__()\n",
    "        self.sentence = sentence[\"tokens\"]\n",
    "        self.miss_ph = miss_placeholder\n",
    "        self.mask_ph = mask_placeholder\n",
    "        self.miss_id = global_tokenizer.convert_tokens_to_ids(miss_placeholder)\n",
    "        self.mask_id = global_tokenizer.convert_tokens_to_ids(mask_placeholder)        \n",
    "        self.device = device\n",
    "        self.index_pairs = self.generate_index_pairs()\n",
    "        self.start = 0\n",
    "        self.end = len(self.index_pairs)\n",
    "    \n",
    "    def generate_index_pairs(self):\n",
    "        length = len(self.sentence)\n",
    "        return [\n",
    "            ([miss_index], [1], [mask_index], [1])\n",
    "            for miss_index in range(1, length-1)\n",
    "                for mask_index in range(1, length-1)\n",
    "                    if miss_index != mask_index\n",
    "        ]\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.index_pairs)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        missing_indices, miss_idx_mask, masked_indices, mask_idx_mask = self.index_pairs[index]\n",
    "        # unmasked_question = list(self.sentence)\n",
    "        unmasked_question = self.sentence.clone()\n",
    "        for missing_index in missing_indices:\n",
    "            # unmasked_question[missing_index] = self.miss_ph\n",
    "            unmasked_question[missing_index] = self.miss_id\n",
    "        # masked_question = list(unmasked_question)\n",
    "        masked_question = unmasked_question.clone()\n",
    "        for masked_index in masked_indices:\n",
    "            # masked_question[masked_index] = self.mask_ph\n",
    "            masked_question[masked_index] = self.mask_id\n",
    "\n",
    "        # this is the format of a question pair.\n",
    "        return {\n",
    "            \"label\": self.sentence,\n",
    "            \"unmasked\": unmasked_question, \n",
    "            \"masked\": masked_question, \n",
    "            \"miss_id\": torch.tensor(missing_indices, device=self.device),\n",
    "            \"mask_id\": torch.tensor(masked_indices, device=self.device),\n",
    "            \"miss_mask\": torch.tensor(miss_idx_mask, device=self.device),\n",
    "            # \"mask_mask\": torch.tensor(mask_idx_mask, device=self.device),\n",
    "        }\n",
    "    \n",
    "def test_pair_iterable():\n",
    "    sentence = {\n",
    "        \"tokens\": torch.from_numpy(np.array([1,2,3,4,5]))\n",
    "    }\n",
    "    dataset = QuestionPairIterable(sentence)\n",
    "    for sample in dataset:\n",
    "        print(sample)\n",
    "\n",
    "test_pair_iterable()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "03100380",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[([1, 2, 3, 0, 0, 0], [1, 1, 1, 0, 0, 0]), ([7, 8, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([10, 11, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([17, 18, 19, 20, 21, 22], [1, 1, 1, 1, 1, 1]), ([25, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]), ([33, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0])]\n",
      "{'label': tensor([  101,  1109, 10209,  2774,  1110,  3666,  1165,  1103,  2233,  1321,\n",
      "         1103,  1532,  1104,   170,   123,   240,   107,   180,   107, 14255,\n",
      "         1916,  9517,  1952,   119,  1370,  1859,   117,  1191,   107,   180,\n",
      "          107,   134,   124,  1195,  1138,   102]), 'unmasked': tensor([  103,   103,   103,   103,  1110,  3666,  1165,  1103,  2233,  1321,\n",
      "         1103,  1532,  1104,   170,   123,   240,   107,   180,   107, 14255,\n",
      "         1916,  9517,  1952,   119,  1370,  1859,   117,  1191,   107,   180,\n",
      "          107,   134,   124,  1195,  1138,   102]), 'masked': tensor([  103,   103,   103,   103,  1110,  3666,  1165,  1103,  2233,  1321,\n",
      "          103,   103,  1104,   170,   123,   240,   107,   180,   107, 14255,\n",
      "         1916,  9517,  1952,   119,  1370,  1859,   117,  1191,   107,   180,\n",
      "          107,   134,   124,  1195,  1138,   102]), 'miss_id': tensor([1, 2, 3, 0, 0, 0]), 'mask_id': tensor([10, 11,  0,  0,  0,  0]), 'miss_mask': tensor([1, 1, 1, 0, 0, 0])}\n"
     ]
    }
   ],
   "source": [
    "class NounQuestionPairIterable(QuestionPairIterable):\n",
    "    def __init__(self, sentence, **args):\n",
    "        self.np_list = sentence[\"np_list\"]\n",
    "        super().__init__(sentence, **args)\n",
    "        \n",
    "    def generate_index_pairs(self):\n",
    "        print(self.np_list)\n",
    "        return [\n",
    "            (miss_idx, miss_mask, mask_idx, mask_mask)\n",
    "            for miss_idx, miss_mask in self.np_list\n",
    "                for mask_idx, mask_mask in self.np_list\n",
    "                    if miss_idx != mask_idx\n",
    "        ]\n",
    "\n",
    "def test_noun_pair_iterable(sentence):\n",
    "    questions = NounQuestionPairIterable(sentence)\n",
    "    print(questions[1])\n",
    "\n",
    "test_noun_pair_iterable(parsed_sentence[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "34833ece",
   "metadata": {},
   "outputs": [],
   "source": [
    "class QuestionPairConsumer:\n",
    "    def __init__(self,\n",
    "        tokenizer=global_tokenizer,\n",
    "        model=global_model,\n",
    "        measure=kl_divergence_dist):\n",
    "        self.tokenizer = tokenizer\n",
    "        self.model = model\n",
    "        self.measure = measure\n",
    "    \n",
    "    def consume_question_pair(self, question_pair):\n",
    "        print(\"==========START consuming question==========\")\n",
    "        # [B(atch), L(ength of sentence)]\n",
    "        context = question_pair[\"label\"]\n",
    "        unmasked = question_pair[\"unmasked\"]\n",
    "        masked = question_pair[\"masked\"]\n",
    "        # [B(atch), n(umber of missing tokens)]\n",
    "        missing_indices = question_pair[\"miss_id\"]\n",
    "        masked_indices = question_pair[\"mask_id\"]\n",
    "        miss_idx_mask = question_pair[\"miss_mask\"]\n",
    "        # mask_idx_mask = question_pair[\"mask_mask\"]\n",
    "        \n",
    "        # u_pred = consume_question(unmasked, context)\n",
    "        # m_pred = consume_question(masked, unmasked)\n",
    "        # [B(atch), L(ength of sentence), V(ocabulary size)]\n",
    "        u_logits = self.model(input_ids=unmasked).logits\n",
    "        m_logits = self.model(input_ids=masked).logits\n",
    "\n",
    "        missing_label_ids = torch.gather(context, 1, missing_indices) # [B, n]\n",
    "        answer_shape = list(missing_indices.shape)\n",
    "        answer_shape.append(u_logits.shape[2])\n",
    "        missing_indices = missing_indices.unsqueeze(2).expand(answer_shape) # [B, n, V]\n",
    "        missing_label_ids = missing_label_ids.unsqueeze(2).expand(answer_shape) # [B, n, V]\n",
    "        ones_template = torch.tensor([[[1.]]]).expand(answer_shape) # [B, n, V]\n",
    "        # golden logits ,g_logits[b][n][index[b][n]] = 1, [B, n, V]\n",
    "        g_logits = torch.scatter(torch.zeros(answer_shape), 2, missing_label_ids, ones_template)\n",
    "        # unmasked logits [B, n, V]\n",
    "        u_logits = torch.gather(u_logits, 1, missing_indices)\n",
    "        # masked logits [B, n, V]\n",
    "        m_logits = torch.gather(m_logits, 1, missing_indices)\n",
    "        print(\"==========END consuming question==========\")\n",
    "        return self.measure(m_logits, u_logits, g_logits, miss_idx_mask)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "1de50d04",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[([1, 2, 3, 0, 0, 0], [1, 1, 1, 0, 0, 0]), ([7, 8, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([10, 11, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([17, 18, 19, 20, 21, 22], [1, 1, 1, 1, 1, 1]), ([25, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]), ([33, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0])]\n",
      "==========START consuming question==========\n",
      "==========END consuming question==========\n",
      "tensor([0.0456, 0.0231, 0.1024, 0.0164], grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "source": [
    "def test_consumer(sent):\n",
    "    producer = NounQuestionPairIterable(sent)\n",
    "    consumer = QuestionPairConsumer(measure=js_divergence_dist) \n",
    "    dataloader = DataLoader(producer, batch_size=4, num_workers=0)\n",
    "    for batch in dataloader:\n",
    "        result = consumer.consume_question_pair(batch)\n",
    "        return result\n",
    "\n",
    "global_result = test_consumer(parsed_sentence[0])\n",
    "print(global_result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "id": "5b940e2c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_batch_size(batch):\n",
    "    for value in batch.values():\n",
    "        return value.shape[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "id": "bbd77c8c",
   "metadata": {},
   "outputs": [],
   "source": [
    "table = \"This table can be completed with the marginal totals of the two variables\"\n",
    "short = \"cochran – armitage test for trend\"\n",
    "hamlet = \"To be or not to be, that is the question.\"\n",
    "hysteria = \"a psychoneurosis marked by emotional excitability and disturbances of \" \\\n",
    "    \"the psychogenic, sensory, vasomotor, and visceral functions\"\n",
    "village = \"The statistical area Berkhout which also can include the peripheral \" \\\n",
    "    \"parts of the village as well as the surrounding countryside has a population of around 1780.\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "id": "685e9434",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'tokens': tensor([  101,  2023,  2795,  2064,  2022,  2949,  2007,  1996, 14785, 21948,\n",
      "         1997,  1996,  2048, 10857,   102]), 'raw': ['[CLS]', 'this', 'table', 'can', 'be', 'completed', 'with', 'the', 'marginal', 'totals', 'of', 'the', 'two', 'variables', '[SEP]']}\n",
      "==========START consuming question==========\n",
      "==========END consuming question==========\n",
      "[\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            2\n",
      "        ],\n",
      "        \"distance\": 0.19909927248954773\n",
      "    },\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            3\n",
      "        ],\n",
      "        \"distance\": 0.0038276538252830505\n",
      "    },\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            4\n",
      "        ],\n",
      "        \"distance\": 0.007673277053982019\n",
      "    },\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            5\n",
      "        ],\n",
      "        \"distance\": 0.02930416166782379\n",
      "    }\n",
      "]\n"
     ]
    }
   ],
   "source": [
    "def sentence_pipeline(sentence, \n",
    "                      transform=default_transform,\n",
    "                      measure=kl_divergence_dist):\n",
    "    sent = transform(sentence)\n",
    "    print(sent)\n",
    "    producer = QuestionPairIterable(sent)\n",
    "    consumer = QuestionPairConsumer(measure=measure) \n",
    "    dataloader = DataLoader(producer, batch_size=4, num_workers=0)\n",
    "    relation_list = []\n",
    "    for batch in dataloader:\n",
    "        distance = consumer.consume_question_pair(batch)\n",
    "        batch_size = get_batch_size(batch)\n",
    "        for index in range(0, batch_size):\n",
    "            relation = {}\n",
    "            for key, batched_tensor in batch.items():\n",
    "                relation[key] = batched_tensor[index]\n",
    "            relation_list.append({\n",
    "                \"missing_index\": relation[\"miss_id\"].tolist(),\n",
    "                \"masked_index\":relation[\"mask_id\"].tolist(),\n",
    "                \"distance\": float(distance[index])\n",
    "            })\n",
    "        break\n",
    "    # relation_list.sort(key=lambda x:x[\"distance\"], reverse=True)\n",
    "    print(json.dumps(relation_list[:5], indent=4))\n",
    "    \n",
    "sentence_pipeline(table)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "id": "788d1dfb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[([1, 2, 3, 0, 0, 0], [1, 1, 1, 0, 0, 0]), ([7, 8, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([10, 11, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0]), ([17, 18, 19, 20, 21, 22], [1, 1, 1, 1, 1, 1]), ([25, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]), ([33, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0])]\n",
      "==========START consuming question==========\n",
      "==========END consuming question==========\n",
      "[\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1,\n",
      "            2,\n",
      "            3,\n",
      "            0,\n",
      "            0,\n",
      "            0\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            7,\n",
      "            8,\n",
      "            0,\n",
      "            0,\n",
      "            0,\n",
      "            0\n",
      "        ],\n",
      "        \"distance\": 0.04362180829048157\n",
      "    },\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1,\n",
      "            2,\n",
      "            3,\n",
      "            0,\n",
      "            0,\n",
      "            0\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            10,\n",
      "            11,\n",
      "            0,\n",
      "            0,\n",
      "            0,\n",
      "            0\n",
      "        ],\n",
      "        \"distance\": 0.0072735026478767395\n",
      "    },\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1,\n",
      "            2,\n",
      "            3,\n",
      "            0,\n",
      "            0,\n",
      "            0\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            17,\n",
      "            18,\n",
      "            19,\n",
      "            20,\n",
      "            21,\n",
      "            22\n",
      "        ],\n",
      "        \"distance\": 0.039240512996912\n",
      "    },\n",
      "    {\n",
      "        \"missing_index\": [\n",
      "            1,\n",
      "            2,\n",
      "            3,\n",
      "            0,\n",
      "            0,\n",
      "            0\n",
      "        ],\n",
      "        \"masked_index\": [\n",
      "            25,\n",
      "            0,\n",
      "            0,\n",
      "            0,\n",
      "            0,\n",
      "            0\n",
      "        ],\n",
      "        \"distance\": 0.011625521816313267\n",
      "    }\n",
      "]\n"
     ]
    }
   ],
   "source": [
    "def parsed_sentence_pipeline(sent, measure=kl_divergence_dist):\n",
    "    producer = NounQuestionPairIterable(sent)\n",
    "    consumer = QuestionPairConsumer(measure=measure) \n",
    "    dataloader = DataLoader(producer, batch_size=4, num_workers=0)\n",
    "    relation_list = []\n",
    "    for batch in dataloader:\n",
    "        distance = consumer.consume_question_pair(batch)\n",
    "        batch_size = get_batch_size(batch)\n",
    "        for index in range(0, batch_size):\n",
    "            relation = {}\n",
    "            for key, batched_tensor in batch.items():\n",
    "                relation[key] = batched_tensor[index]\n",
    "            relation_list.append({\n",
    "                \"missing_index\": relation[\"miss_id\"].tolist(),\n",
    "                \"masked_index\":relation[\"mask_id\"].tolist(),\n",
    "                \"distance\": float(distance[index])\n",
    "            })\n",
    "        break\n",
    "    # relation_list.sort(key=lambda x:x[\"distance\"], reverse=True)\n",
    "    print(json.dumps(relation_list[:5], indent=4))\n",
    "    \n",
    "parsed_sentence_pipeline(parsed_sentence[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "af3ca514",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SaveManager:\n",
    "    def __init__(self,\n",
    "        dump_dir=DUMP_DIR,\n",
    "        counter=0,\n",
    "        log_interval=100,\n",
    "        save_interval=500):\n",
    "        self.sentence_dict = {}\n",
    "        self.relation_list = []\n",
    "        self.log_interval = log_interval\n",
    "        self.save_interval = save_interval\n",
    "        self.counter = counter - counter % save_interval\n",
    "        self.dump_dir = dump_dir\n",
    "        self.progress_path = os.path.join(self.dump_dir, \"progress.log\")\n",
    "        self.rel_template = os.path.join(dump_dir, \"relation_list_cnt_{}.dump\")\n",
    "        self.stc_template = os.path.join(dump_dir, \"sentence_dict_cnt_{}.dump\")\n",
    "\n",
    "    def load_progress(self):\n",
    "        return (0, 0)\n",
    "\n",
    "        if os.path.exists(self.progress_path):\n",
    "            with open(self.progress_path, \"r\") as p_log:\n",
    "                progress = json.load(p_log)\n",
    "                file_id = progress[\"file_id\"]\n",
    "                stc_id = progress[\"stc_id\"]\n",
    "                self.save_interval = progress[\"save_interval\"]\n",
    "                self.counter = progress[\"counter\"]\n",
    "                return (file_id, stc_id)\n",
    "        return (0, 0)\n",
    "\n",
    "    def dump_progress(self, file_id, stc_id):\n",
    "        with open(self.progress_path, \"w\") as p_log:\n",
    "            progress = {\n",
    "                \"file_id\": file_id,\n",
    "                \"stc_id\": stc_id,\n",
    "                \"counter\": self.counter,\n",
    "                \"save_interval\": self.save_interval\n",
    "            }\n",
    "            p_log.write(json.dumps(progress))\n",
    "        \n",
    "    def save_sentence_list(self):\n",
    "        sentence_list = []\n",
    "        for context_id, raw_tokens in self.sentence_dict:\n",
    "            self.sentence_list.append({\n",
    "            \"id\": context_id,\n",
    "            \"context\": raw_tokens\n",
    "        })\n",
    "        sentence_list.sort(key=lambda x:x[\"id\"])\n",
    "        save_path = self.stc_template.format(self.counter)\n",
    "        with open(save_path, \"w\") as f:\n",
    "            for sentence in sentence_list:\n",
    "                f.write(json.dumps(sentence)+\"\\n\")\n",
    "\n",
    "    def update_sentence(self, sentence, context_id):\n",
    "        self.sentence_dict[context_id] = sentence[\"raw\"]\n",
    "    \n",
    "    def save_relation_list(self):\n",
    "        save_path = self.rel_template.format(self.counter)\n",
    "        with open(save_path, \"w\") as f:\n",
    "            for relation in self.relation_list:\n",
    "                f.write(json.dumps(relation)+\"\\n\")\n",
    "    \n",
    "    def update_relation(self, sample, distance, context_id):\n",
    "        self.relation_list.append({\n",
    "            \"context\": context_id,\n",
    "            \"missing_index\": sample[\"miss_id\"].tolist(),\n",
    "            \"masked_index\": sample[\"mask_id\"].tolist(),\n",
    "            \"distance\": float(distance)\n",
    "        })\n",
    "        self.counter += 1\n",
    "        if self.counter % self.log_interval == 0:\n",
    "            print(\"Got example count: \", self.counter)\n",
    "        if self.counter % self.save_interval == 0:\n",
    "            print(\"Save examples.\")\n",
    "            save_relation_list()\n",
    "            save_sentence_list()\n",
    "            self.relation_list = []\n",
    "            self.sentence_dict = {}\n",
    "\n",
    "    def update_relation_batched(self, batch, distance, context_id):\n",
    "        batch_size = get_batch_size(batch)\n",
    "        for index in range(0, batch_size):\n",
    "            relation = {}\n",
    "            for key, batched_tensor in batch.items():\n",
    "                relation[key] = batched_tensor[index]\n",
    "            self.update_relation(relation, distance[index], context_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bff0f8dc",
   "metadata": {},
   "outputs": [],
   "source": [
    "def main():\n",
    "    sentence_list = []\n",
    "    relation_list = []\n",
    "    log_interval = 100\n",
    "    save_interval = 500\n",
    "\n",
    "    save_manager = SaveManager(save_interval=save_interval)\n",
    "    last_file_id, last_stc_id = save_manager.load_progress()\n",
    "   \n",
    "    sentence_dataset = SentenceIterable(file_id=last_file_id,stc_id=last_stc_id)\n",
    "    consumer = QuestionPairConsumer() \n",
    "\n",
    "    for sentence, file_id, stc_id in sentence_dataset:\n",
    "        context_id = file_id * 50000 + stc_id\n",
    "        stc_relation_list = []\n",
    "        question_pair_dataset = QuestionPairIterable(sentence)\n",
    "        dataloader = DataLoader(question_pair_dataset, batch_size=32, num_workers=0)\n",
    "        for sample_batched in dataloader:\n",
    "            distance = consumer.consume_question_pair(sample_batched)\n",
    "            save_manager.update_sentence(sentence, context_id)\n",
    "            save_manager.update_relation_batched(sample_batched, distance, context_id)\n",
    "            save_manager.dump_progress(file_id, stc_id)\n",
    "            break\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "66b39e02",
   "metadata": {},
   "outputs": [],
   "source": [
    "main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "daaab200",
   "metadata": {},
   "outputs": [],
   "source": [
    "sample = main()\n",
    "label = sample[\"label\"]\n",
    "unmasked = sample[\"unmasked\"]\n",
    "masked = sample[\"masked\"]\n",
    "print(sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a3a0771c",
   "metadata": {},
   "outputs": [],
   "source": [
    "consumer = QuestionPairConsumer()\n",
    "g_logits, u_logits, m_logits = consumer.consume_question_pair(sample)\n",
    "print(g_logits, u_logits, m_logits)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "50882aa5",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer_to_train = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "model_to_train = BertModel.from_pretrained('bert-base-uncased')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "26d43028",
   "metadata": {},
   "outputs": [],
   "source": [
    "inputs = tokenizer_to_train(hamlet, return_tensors=\"pt\")\n",
    "outputs = model_to_train(**inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d97ded4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(outputs.keys())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ac6b7ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from transformers import BertForSequenceClassification\n",
    "model = BertForSequenceClassification.from_pretrained('bert-base-uncased')\n",
    "model.train()\n",
    "from transformers import AdamW\n",
    "optimizer = AdamW(model.parameters(), lr=1e-5)\n",
    "from transformers import BertTokenizer\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "text_batch = [\"I love Pixar.\", \"I don't care for Pixar.\"]\n",
    "encoding = tokenizer(text_batch, return_tensors='pt', padding=True, truncation=True)\n",
    "input_ids = encoding['input_ids']\n",
    "attention_mask = encoding['attention_mask']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ef37e6d",
   "metadata": {},
   "outputs": [],
   "source": [
    "labels = torch.tensor([1,0]).unsqueeze(0)\n",
    "outputs = model(input_ids, attention_mask=attention_mask, labels=labels)\n",
    "loss = outputs.loss\n",
    "loss.backward()\n",
    "optimizer.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9ffaccff",
   "metadata": {},
   "outputs": [],
   "source": [
    "import nltk\n",
    "from nltk.tokenize import word_tokenize\n",
    "from nltk.tag import pos_tag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c6501f52",
   "metadata": {},
   "outputs": [],
   "source": [
    "import spacy\n",
    "nlp = spacy.load(\"en_core_web_sm\", exclude=['ner','lemmatizer'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e800d8a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(nlp.pipe_names)\n",
    "print(nlp.analyze_pipes(pretty=True))\n",
    "doc = nlp(hysteria)\n",
    "print(doc)\n",
    "for np in doc.noun_chunks:\n",
    "    print(np.start, np.end)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "39ac7c63",
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "pattern = re.compile(\"wikipedia/text/(.*)$\")\n",
    "with open(\"filelist.txt\") as f, open(\"new_filelist.txt\", \"w\") as fw:\n",
    "    for line in f:\n",
    "        path = pattern.search(line).group(1)\n",
    "        fw.write(path+\"\\n\")\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8b1b1d1e",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'global_tokenizer' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-1-0405e7e85dda>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtokenized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mglobal_tokenizer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhysteria\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"input_ids\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m \u001b[0mraw\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mglobal_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_ids_to_tokens\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtokenized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m \u001b[0mids\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mglobal_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_tokens_to_ids\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mraw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mraw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mids\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mNameError\u001b[0m: name 'global_tokenizer' is not defined"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ebd761f0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
