{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from transformers import BertTokenizer, BertConfig\n",
    "from transformers import BertModel\n",
    "import nltk\n",
    "import numpy as np\n",
    "import torch\n",
    "import re"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[PhysicalDevice(name='/physical_device:CPU:0', device_type='CPU'),\n",
       " PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tf.config.list_physical_devices()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "def is_available_in_pos_list(dict_list, begin_pos, end_pos):\n",
    "    for entry in dict_list:\n",
    "        if((entry[\"start\"]<=begin_pos and entry[\"end\"]>begin_pos) or ((entry[\"start\"]<end_pos and entry[\"end\"]>=end_pos))):\n",
    "            return True\n",
    "    \n",
    "    return False\n",
    "'''\n",
    "def get_bert_token_positions(input_text,token_list,start_from_pos=0,):\n",
    "    \n",
    "    pos_list = []                    \n",
    "    \n",
    "    name_to_match = input_text.lower().replace(\" \",\"\")\n",
    "    remaining_name = input_text.lower().replace(\" \",\"\")\n",
    "    \n",
    "    name = \"\"\n",
    "    count = start_from_pos\n",
    "\n",
    "    for entry in token_list[start_from_pos:]:\n",
    "        if(remaining_name.startswith(entry.strip(\"##\").lower())):\n",
    "            pos_list.append(count)\n",
    "            remaining_name = remaining_name[len(entry.strip(\"##\").lower()):]\n",
    "            name = name + entry.strip(\"##\").lower()\n",
    "            if(name == name_to_match):\n",
    "                break\n",
    "        else:\n",
    "            pos_list = []\n",
    "            name = \"\"\n",
    "            remaining_name = name_to_match\n",
    "            if(remaining_name.startswith(entry.strip(\"##\").lower())):                                 \n",
    "                pos_list.append(count)                                                                \n",
    "                remaining_name = remaining_name[len(entry.strip(\"##\").lower()):]\n",
    "                name = name + entry.strip(\"##\").lower()    \n",
    "                if(name == name_to_match):                                   \n",
    "                    break\n",
    "\n",
    "        count = count + 1\n",
    "\n",
    "    return pos_list\n",
    "'''\n",
    "def get_bert_token_positions(input_text,token_list,start_from_pos=0,prior_partial_word=\"\"):\n",
    "    partial_word = \"\"\n",
    "\n",
    "    pos_list = []                    \n",
    "    \n",
    "    if(prior_partial_word!=\"\"):\n",
    "        input_text = prior_partial_word + input_text \n",
    "\n",
    "    name_to_match = input_text.lower().replace(\" \",\"\")\n",
    "    remaining_name = input_text.lower().replace(\" \",\"\")\n",
    "    \n",
    "    name = \"\"\n",
    "    count = start_from_pos\n",
    "\n",
    "    for entry in token_list[start_from_pos:]:\n",
    "        entry_text = entry.strip(\"##\").lower()\n",
    "        if(entry_text.startswith(remaining_name) and (entry_text != remaining_name)):\n",
    "            partial_word = remaining_name\n",
    "            pos_list.append(count)\n",
    "            (\"Appended \",count)\n",
    "            break\n",
    "             \n",
    "        if(remaining_name.startswith(entry_text)):\n",
    "            (\"Appended \",count)\n",
    "            pos_list.append(count)\n",
    "            remaining_name = remaining_name[len(entry_text):]\n",
    "            name = name + entry_text\n",
    "            if(name == name_to_match):\n",
    "                break\n",
    "        else:\n",
    "            pos_list = []\n",
    "            name = \"\"\n",
    "            remaining_name = name_to_match\n",
    "            if(remaining_name.startswith(entry.strip(\"##\").lower())):                                 \n",
    "                (\"Appended \",count)\n",
    "                pos_list.append(count)                                                                \n",
    "                remaining_name = remaining_name[len(entry.strip(\"##\").lower()):]\n",
    "                name = name + entry.strip(\"##\").lower()    \n",
    "                if(name == name_to_match):                                   \n",
    "                    break\n",
    "\n",
    "        count = count + 1\n",
    "\n",
    "    return [pos_list,partial_word]\n",
    "\n",
    "def process_string(string_input, entity_list):\n",
    "    print(entity_list)\n",
    "    string_input = re.sub(r'Admission Date :\\n([0-9/ ]*)\\n', 'Admission Date : \\g<1>\\n', string_input)\n",
    "    string_input = re.sub(r'Discharge Date :\\n([0-9/ ]*)\\n', 'Discharge Date : \\g<1>\\n', string_input)\n",
    "    \n",
    "    \n",
    "    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n",
    "    init_sentences = tokenizer.tokenize(string_input)\n",
    "    \n",
    "    sentences = []\n",
    "    \n",
    "    for entry in init_sentences:\n",
    "        sentences.extend(entry.split(\"\\n\"))\n",
    "    \n",
    "    #config = BertConfig.from_pretrained('C:/Users/itsma/Documents/BERT_models/NCBI_BERT_pubmed_mimic_uncased_L-12_H-768_A-12')\n",
    "    #config.output_hidden_states = True\n",
    "\n",
    "    bert_tokenizer = BertTokenizer.from_pretrained('C:/Users/itsma/Documents/BERT_models/NCBI_BERT_pubmed_mimic_uncased_L-12_H-768_A-12')\n",
    "    #bert_model = BertModel.from_pretrained(\"C:/Users/itsma/Documents/BERT_models/NCBI_BERT_pubmed_mimic_uncased_L-12_H-768_A-12\",config=config)\n",
    "    \n",
    "    positions_covered = 0\n",
    "    sentence_list = []\n",
    "    \n",
    "    for index in range(len(sentences)):\n",
    "        new_dict_sentence = {}\n",
    "        sentence = sentences[index]\n",
    "        new_dict_sentence[\"sentence\"] = sentence\n",
    "        #new_dict_sentence[\"padding_length\"] = padding_length\n",
    "        #start_index_bert = max(0,index-padding_length)\n",
    "        #end_index_bert = min(len(sentences),index+padding_length)\n",
    "\n",
    "        bert_input = sentences[index]\n",
    "        \n",
    "        encodings = bert_tokenizer.encode(bert_input,add_special_tokens = True)\n",
    "        new_dict_sentence[\"encodings\"] = encodings\n",
    "        input_ids = torch.tensor(encodings).unsqueeze(0)  \n",
    "        #outputs = bert_model(input_ids)\n",
    "        #bert_vector = outputs[2]\n",
    "        bert_tokens = bert_tokenizer.convert_ids_to_tokens(encodings) #bert_tokenizer.tokenize(bert_input,add_special_tokens = True)\n",
    "        print(bert_tokens)\n",
    "        new_dict_sentence[\"tokens\"] = bert_tokens\n",
    "        \n",
    "        start_pos = 0\n",
    "        #prior_pos = get_bert_token_positions(' '.join(sentences[start_index_bert:index]),bert_tokens)\n",
    "        \n",
    "        #if(len(prior_pos)>0):\n",
    "            #start_pos = max(prior_pos)\n",
    "            \n",
    "        tokens = nltk.word_tokenize(sentence)\n",
    "        pos_tokens = nltk.pos_tag(tokens)\n",
    "\n",
    "        sentence_covered = ''\n",
    "        prior_partial_word = ''\n",
    "        label_list = [0] * len(encodings)\n",
    "        \n",
    "        for token in pos_tokens:\n",
    "            new_dict = {}\n",
    "            current_word = token[0]\n",
    "            \n",
    "            [bert_token_positions, partial_word] = get_bert_token_positions(current_word,bert_tokens,start_pos,prior_partial_word)\n",
    "            \n",
    "            #ec_list_layers = []\n",
    "            \n",
    "            if(len(bert_token_positions)==0):\n",
    "                prior_partial_word = \"\"\n",
    "                continue\n",
    "            if(partial_word != \"\"):\n",
    "                prior_partial_word = partial_word\n",
    "                start_pos = bert_token_positions[-1]\n",
    "            else:\n",
    "                prior_partial_word = \"\"\n",
    "                start_pos = bert_token_positions[-1] + 1\n",
    "            print(start_pos)\n",
    "            token_position = string_input.find(current_word, positions_covered)\n",
    "            spaces_between = string_input[positions_covered:token_position] \n",
    "            sentence_covered = sentence_covered + spaces_between + current_word\n",
    "            positions_covered = token_position + len(current_word)\n",
    "            begin_pos = token_position\n",
    "            end_pos = positions_covered\n",
    "            print(current_word)\n",
    "            print(begin_pos)\n",
    "            print(end_pos)\n",
    "            #bert_token_positions = get_bert_token_positions(current_word,bert_tokens,start_pos)[0]\n",
    "           \n",
    "            '''\n",
    "            vec_list_layers = []\n",
    "            \n",
    "            if(len(bert_token_positions)==0):\n",
    "                continue\n",
    "            start_pos = bert_token_positions[-1] + 1\n",
    "            '''\n",
    "            entity_index=1\n",
    "            print(bert_token_positions)\n",
    "            for entity in entity_list:\n",
    "                if(is_available_in_pos_list(entity,begin_pos,end_pos)):\n",
    "                    print(\"True\")\n",
    "                    for entry in bert_token_positions:\n",
    "                        label_list[entry] = entity_index\n",
    "                \n",
    "                entity_index = entity_index + 1\n",
    "                \n",
    "                \n",
    "        new_dict_sentence[\"labels\"] = label_list\n",
    "\n",
    "        sentence_list.append(new_dict_sentence)\n",
    "        \n",
    "    return sentence_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DischargeNote():\n",
    "    def __init__(self, root,padding_length=0):\n",
    "        self.xml_root = root\n",
    "        self.padding_length = padding_length\n",
    "        \n",
    "    def process_note(self):\n",
    "        root = self.xml_root\n",
    "        text_section = root.find('TEXT')\n",
    "        text = text_section.text\n",
    "        \n",
    "        tag_section = root.find('TAGS')\n",
    "        event_list = []\n",
    "        timex_list = []\n",
    "        tlink_list = []\n",
    "        sectime_list = []\n",
    "        for child in tag_section:\n",
    "            if(child.tag=='EVENT'):\n",
    "                event_list.append(child.attrib)\n",
    "            elif(child.tag=='TIMEX3'):\n",
    "                timex_list.append(child.attrib)\n",
    "            elif(child.tag=='TLINK'):\n",
    "                tlink_list.append(child.attrib)\n",
    "            elif(child.tag=='SECTIME'):\n",
    "                sectime_list.append(child.attrib)\n",
    "        \n",
    "        event_list_new = []\n",
    "        \n",
    "        for sub in event_list:\n",
    "            new_dict = {}\n",
    "            new_dict[\"start\"] = int(sub[\"start\"])\n",
    "            new_dict[\"end\"] = int(sub[\"end\"])\n",
    "            event_list_new.append(new_dict)\n",
    "        \n",
    "        event_list_new = sorted(event_list_new, key = lambda i: i['start'])\n",
    "        \n",
    "        timex_list_new = []\n",
    "        \n",
    "        for sub in timex_list:\n",
    "            new_dict = {}\n",
    "            new_dict[\"start\"] = int(sub[\"start\"])\n",
    "            new_dict[\"end\"] = int(sub[\"end\"])\n",
    "            timex_list_new.append(new_dict)\n",
    "        \n",
    "        timex_list_new = sorted(timex_list_new, key = lambda i: i['start'])\n",
    "        \n",
    "        entity_list = []\n",
    "        entity_list.append(event_list_new)\n",
    "        entity_list.append(timex_list_new)\n",
    "        \n",
    "        \n",
    "        self.processed_text = process_string(text,entity_list)\n",
    "        \n",
    "        sentences_length = len(self.processed_text)\n",
    "        encoding_list = []\n",
    "        label_list = []\n",
    "        token_list = []\n",
    "        \n",
    "        for sentence_index in range(sentences_length):\n",
    "            padding_unsuccess = True\n",
    "            padding_length = self.padding_length\n",
    "            \n",
    "            encodings = []\n",
    "            labels = []\n",
    "            tokens = []\n",
    "            while(padding_unsuccess):\n",
    "                encodings = []\n",
    "                labels = []\n",
    "                tokens=[]\n",
    "                \n",
    "                begin_index = max(0,sentence_index-padding_length)\n",
    "                end_index = min(sentences_length,sentence_index+padding_length)\n",
    "                current_index = 1\n",
    "                last_index = end_index - begin_index + 1\n",
    "        \n",
    "                if(begin_index==end_index):\n",
    "                    entry = self.processed_text[begin_index]\n",
    "                    encodings.extend(entry['encodings'])\n",
    "                    tokens.extend(entry['tokens'])\n",
    "                    labels.extend(entry['labels'])\n",
    "                else:\n",
    "                    for entry in self.processed_text[begin_index:end_index+1]:\n",
    "                        if(current_index==1):\n",
    "                            encodings.extend(entry['encodings'][:-1])\n",
    "                            tokens.extend(entry['tokens'][:-1])\n",
    "                            labels.extend(entry['labels'][:-1])\n",
    "                        elif(current_index==last_index):\n",
    "                            encodings.extend(entry['encodings'][1:])\n",
    "                            tokens.extend(entry['tokens'][1:])\n",
    "                            labels.extend(entry['labels'][1:])\n",
    "                        else:\n",
    "                            encodings.extend(entry['encodings'][1:-1])\n",
    "                            tokens.extend(entry['tokens'][1:-1])\n",
    "                            labels.extend(entry['labels'][1:-1])\n",
    "                        current_index = current_index + 1\n",
    "\n",
    "                if(len(encodings)<=512):\n",
    "                    padding_unsuccess = False\n",
    "                else:\n",
    "                    padding_length = max(0,padding_length - 1)\n",
    "            \n",
    "            encoding_list.append(encodings)\n",
    "            token_list.append(tokens)\n",
    "            label_list.append(labels)\n",
    "            \n",
    "        return [encoding_list,token_list,label_list]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import xml.etree.ElementTree as ET"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[{'start': 1, 'end': 10}, {'start': 29, 'end': 38}, {'start': 128, 'end': 140}, {'start': 161, 'end': 170}, {'start': 176, 'end': 200}, {'start': 212, 'end': 218}, {'start': 223, 'end': 231}, {'start': 241, 'end': 266}, {'start': 277, 'end': 286}, {'start': 302, 'end': 324}, {'start': 367, 'end': 379}, {'start': 423, 'end': 441}, {'start': 449, 'end': 474}, {'start': 493, 'end': 518}, {'start': 549, 'end': 566}, {'start': 579, 'end': 588}, {'start': 592, 'end': 630}, {'start': 650, 'end': 677}, {'start': 702, 'end': 714}, {'start': 719, 'end': 727}, {'start': 731, 'end': 734}, {'start': 759, 'end': 765}, {'start': 770, 'end': 778}, {'start': 790, 'end': 814}, {'start': 848, 'end': 853}, {'start': 888, 'end': 900}, {'start': 946, 'end': 954}, {'start': 958, 'end': 972}, {'start': 995, 'end': 1000}, {'start': 1010, 'end': 1015}, {'start': 1036, 'end': 1044}, {'start': 1058, 'end': 1064}, {'start': 1070, 'end': 1078}, {'start': 1082, 'end': 1093}, {'start': 1115, 'end': 1124}, {'start': 1138, 'end': 1179}, {'start': 1185, 'end': 1227}, {'start': 1238, 'end': 1255}, {'start': 1281, 'end': 1299}, {'start': 1302, 'end': 1326}, {'start': 1331, 'end': 1348}, {'start': 1374, 'end': 1380}, {'start': 1383, 'end': 1394}, {'start': 1399, 'end': 1414}, {'start': 1428, 'end': 1432}, {'start': 1466, 'end': 1495}, {'start': 1500, 'end': 1518}, {'start': 1544, 'end': 1573}, {'start': 1621, 'end': 1628}, {'start': 1633, 'end': 1643}, {'start': 1661, 'end': 1673}, {'start': 1685, 'end': 1698}, {'start': 1702, 'end': 1736}, {'start': 1803, 'end': 1811}, {'start': 1818, 'end': 1851}, {'start': 1856, 'end': 1859}, {'start': 1905, 'end': 1913}, {'start': 1918, 'end': 1931}, {'start': 1953, 'end': 1965}, {'start': 1992, 'end': 2000}, {'start': 2004, 'end': 2016}, {'start': 2060, 'end': 2069}, {'start': 2091, 'end': 2099}, {'start': 2115, 'end': 2128}, {'start': 2132, 'end': 2152}, {'start': 2195, 'end': 2209}, {'start': 2231, 'end': 2245}, {'start': 2260, 'end': 2269}, {'start': 2272, 'end': 2291}, {'start': 2315, 'end': 2319}, {'start': 2324, 'end': 2332}, {'start': 2337, 'end': 2349}, {'start': 2353, 'end': 2365}, {'start': 2368, 'end': 2381}, {'start': 2450, 'end': 2470}, {'start': 2476, 'end': 2483}, {'start': 2496, 'end': 2504}], [{'start': 18, 'end': 28}, {'start': 46, 'end': 56}, {'start': 145, 'end': 154}, {'start': 290, 'end': 294}, {'start': 524, 'end': 533}, {'start': 859, 'end': 873}, {'start': 920, 'end': 929}, {'start': 1099, 'end': 1104}, {'start': 1259, 'end': 1268}, {'start': 1577, 'end': 1581}, {'start': 1647, 'end': 1656}, {'start': 2022, 'end': 2044}, {'start': 2156, 'end': 2163}, {'start': 2249, 'end': 2271}, {'start': 2400, 'end': 2405}]]\n",
      "['[CLS]', '[SEP]']\n",
      "['[CLS]', 'admission', 'date', ':', '09', '/', '29', '/', '1993', '[SEP]']\n",
      "1\n",
      "Admission\n",
      "1\n",
      "10\n",
      "[1]\n",
      "True\n",
      "2\n",
      "Date\n",
      "11\n",
      "15\n",
      "[1, 2]\n",
      "3\n",
      ":\n",
      "16\n",
      "17\n",
      "[2, 3]\n",
      "8\n",
      "09/29/1993\n",
      "18\n",
      "28\n",
      "[3, 4, 5, 6, 7, 8]\n",
      "True\n",
      "['[CLS]', 'discharge', 'date', ':', '10', '/', '04', '/', '1993', '[SEP]']\n",
      "1\n",
      "Discharge\n",
      "29\n",
      "38\n",
      "[1]\n",
      "True\n",
      "2\n",
      "Date\n",
      "39\n",
      "43\n",
      "[1, 2]\n",
      "3\n",
      ":\n",
      "44\n",
      "45\n",
      "[2, 3]\n",
      "8\n",
      "10/04/1993\n",
      "46\n",
      "56\n",
      "[3, 4, 5, 6, 7, 8]\n",
      "True\n",
      "['[CLS]', 'history', 'of', 'present', 'illness', ':', '[SEP]']\n",
      "1\n",
      "HISTORY\n",
      "57\n",
      "64\n",
      "[1]\n",
      "2\n",
      "OF\n",
      "65\n",
      "67\n",
      "[1, 2]\n",
      "3\n",
      "PRESENT\n",
      "68\n",
      "75\n",
      "[2, 3]\n",
      "4\n",
      "ILLNESS\n",
      "76\n",
      "83\n",
      "[3, 4]\n",
      "5\n",
      ":\n",
      "84\n",
      "85\n",
      "[4, 5]\n",
      "['[CLS]', 'the', 'patient', 'is', 'a', '28', '-', 'year', '-', 'old', 'woman', 'who', 'is', 'hiv', 'positive', 'for', 'two', 'years', '.', '[SEP]']\n",
      "1\n",
      "The\n",
      "86\n",
      "89\n",
      "[1]\n",
      "2\n",
      "patient\n",
      "90\n",
      "97\n",
      "[1, 2]\n",
      "3\n",
      "is\n",
      "98\n",
      "100\n",
      "[2, 3]\n",
      "4\n",
      "a\n",
      "101\n",
      "102\n",
      "[3, 4]\n",
      "9\n",
      "28-year-old\n",
      "103\n",
      "114\n",
      "[4, 5, 6, 7, 8, 9]\n",
      "10\n",
      "woman\n",
      "115\n",
      "120\n",
      "[9, 10]\n",
      "11\n",
      "who\n",
      "121\n",
      "124\n",
      "[10, 11]\n",
      "12\n",
      "is\n",
      "125\n",
      "127\n",
      "[11, 12]\n",
      "13\n",
      "HIV\n",
      "128\n",
      "131\n",
      "[12, 13]\n",
      "True\n",
      "14\n",
      "positive\n",
      "132\n",
      "140\n",
      "[13, 14]\n",
      "True\n",
      "15\n",
      "for\n",
      "141\n",
      "144\n",
      "[14, 15]\n",
      "16\n",
      "two\n",
      "145\n",
      "148\n",
      "[15, 16]\n",
      "True\n",
      "17\n",
      "years\n",
      "149\n",
      "154\n",
      "[16, 17]\n",
      "True\n",
      "18\n",
      ".\n",
      "155\n",
      "156\n",
      "[17, 18]\n",
      "['[CLS]', 'she', 'presented', 'with', 'left', 'upper', 'quadrant', 'pain', 'as', 'well', 'as', 'nausea', 'and', 'vomiting', 'which', 'is', 'a', 'long', '-', 'standing', 'complaint', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "157\n",
      "160\n",
      "[1]\n",
      "2\n",
      "presented\n",
      "161\n",
      "170\n",
      "[1, 2]\n",
      "True\n",
      "3\n",
      "with\n",
      "171\n",
      "175\n",
      "[2, 3]\n",
      "4\n",
      "left\n",
      "176\n",
      "180\n",
      "[3, 4]\n",
      "True\n",
      "5\n",
      "upper\n",
      "181\n",
      "186\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      "quadrant\n",
      "187\n",
      "195\n",
      "[5, 6]\n",
      "True\n",
      "7\n",
      "pain\n",
      "196\n",
      "200\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "as\n",
      "201\n",
      "203\n",
      "[7, 8]\n",
      "9\n",
      "well\n",
      "204\n",
      "208\n",
      "[8, 9]\n",
      "10\n",
      "as\n",
      "209\n",
      "211\n",
      "[9, 10]\n",
      "11\n",
      "nausea\n",
      "212\n",
      "218\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      "and\n",
      "219\n",
      "222\n",
      "[11, 12]\n",
      "13\n",
      "vomiting\n",
      "223\n",
      "231\n",
      "[12, 13]\n",
      "True\n",
      "14\n",
      "which\n",
      "232\n",
      "237\n",
      "[13, 14]\n",
      "15\n",
      "is\n",
      "238\n",
      "240\n",
      "[14, 15]\n",
      "16\n",
      "a\n",
      "241\n",
      "242\n",
      "[15, 16]\n",
      "True\n",
      "19\n",
      "long-standing\n",
      "243\n",
      "256\n",
      "[16, 17, 18, 19]\n",
      "True\n",
      "20\n",
      "complaint\n",
      "257\n",
      "266\n",
      "[19, 20]\n",
      "True\n",
      "21\n",
      ".\n",
      "267\n",
      "268\n",
      "[20, 21]\n",
      "['[CLS]', 'she', 'was', 'diagnosed', 'in', '1991', 'during', 'the', 'birth', 'of', 'her', 'child', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "269\n",
      "272\n",
      "[1]\n",
      "2\n",
      "was\n",
      "273\n",
      "276\n",
      "[1, 2]\n",
      "3\n",
      "diagnosed\n",
      "277\n",
      "286\n",
      "[2, 3]\n",
      "True\n",
      "4\n",
      "in\n",
      "287\n",
      "289\n",
      "[3, 4]\n",
      "5\n",
      "1991\n",
      "290\n",
      "294\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      "during\n",
      "295\n",
      "301\n",
      "[5, 6]\n",
      "7\n",
      "the\n",
      "302\n",
      "305\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "birth\n",
      "306\n",
      "311\n",
      "[7, 8]\n",
      "True\n",
      "9\n",
      "of\n",
      "312\n",
      "314\n",
      "[8, 9]\n",
      "True\n",
      "10\n",
      "her\n",
      "315\n",
      "318\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "child\n",
      "319\n",
      "324\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      ".\n",
      "325\n",
      "326\n",
      "[11, 12]\n",
      "['[CLS]', 'she', 'claims', 'she', 'does', 'not', 'know', 'why', 'she', 'is', 'hiv', 'positive', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "327\n",
      "330\n",
      "[1]\n",
      "2\n",
      "claims\n",
      "331\n",
      "337\n",
      "[1, 2]\n",
      "3\n",
      "she\n",
      "338\n",
      "341\n",
      "[2, 3]\n",
      "4\n",
      "does\n",
      "342\n",
      "346\n",
      "[3, 4]\n",
      "5\n",
      "not\n",
      "347\n",
      "350\n",
      "[4, 5]\n",
      "6\n",
      "know\n",
      "351\n",
      "355\n",
      "[5, 6]\n",
      "7\n",
      "why\n",
      "356\n",
      "359\n",
      "[6, 7]\n",
      "8\n",
      "she\n",
      "360\n",
      "363\n",
      "[7, 8]\n",
      "9\n",
      "is\n",
      "364\n",
      "366\n",
      "[8, 9]\n",
      "10\n",
      "HIV\n",
      "367\n",
      "370\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "positive\n",
      "371\n",
      "379\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      ".\n",
      "380\n",
      "381\n",
      "[11, 12]\n",
      "['[CLS]', 'she', 'is', 'from', 'maryland', ',', 'apparently', 'had', 'no', 'blood', 'trans', '##fusion', '##s', 'before', 'the', 'birth', 'of', 'her', 'children', 'so', 'it', 'is', 'presumed', 'heterosexual', 'transmission', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "382\n",
      "385\n",
      "[1]\n",
      "2\n",
      "is\n",
      "386\n",
      "388\n",
      "[1, 2]\n",
      "3\n",
      "from\n",
      "389\n",
      "393\n",
      "[2, 3]\n",
      "4\n",
      "Maryland\n",
      "394\n",
      "402\n",
      "[3, 4]\n",
      "5\n",
      ",\n",
      "403\n",
      "404\n",
      "[4, 5]\n",
      "6\n",
      "apparently\n",
      "405\n",
      "415\n",
      "[5, 6]\n",
      "7\n",
      "had\n",
      "416\n",
      "419\n",
      "[6, 7]\n",
      "8\n",
      "no\n",
      "420\n",
      "422\n",
      "[7, 8]\n",
      "9\n",
      "blood\n",
      "423\n",
      "428\n",
      "[8, 9]\n",
      "True\n",
      "12\n",
      "transfusions\n",
      "429\n",
      "441\n",
      "[9, 10, 11, 12]\n",
      "True\n",
      "13\n",
      "before\n",
      "442\n",
      "448\n",
      "[12, 13]\n",
      "14\n",
      "the\n",
      "449\n",
      "452\n",
      "[13, 14]\n",
      "True\n",
      "15\n",
      "birth\n",
      "453\n",
      "458\n",
      "[14, 15]\n",
      "True\n",
      "16\n",
      "of\n",
      "459\n",
      "461\n",
      "[15, 16]\n",
      "True\n",
      "17\n",
      "her\n",
      "462\n",
      "465\n",
      "[16, 17]\n",
      "True\n",
      "18\n",
      "children\n",
      "466\n",
      "474\n",
      "[17, 18]\n",
      "True\n",
      "19\n",
      "so\n",
      "475\n",
      "477\n",
      "[18, 19]\n",
      "20\n",
      "it\n",
      "478\n",
      "480\n",
      "[19, 20]\n",
      "21\n",
      "is\n",
      "481\n",
      "483\n",
      "[20, 21]\n",
      "22\n",
      "presumed\n",
      "484\n",
      "492\n",
      "[21, 22]\n",
      "23\n",
      "heterosexual\n",
      "493\n",
      "505\n",
      "[22, 23]\n",
      "True\n",
      "24\n",
      "transmission\n",
      "506\n",
      "518\n",
      "[23, 24]\n",
      "True\n",
      "25\n",
      ".\n",
      "519\n",
      "520\n",
      "[24, 25]\n",
      "['[CLS]', 'at', 'that', 'time', ',', 'she', 'also', 'had', 'cat', 'scratch', 'fever', 'and', 'she', 'had', 'res', '##ection', 'of', 'an', 'abs', '##ces', '##s', 'in', 'the', 'left', 'lower', 'ex', '##tre', '##mity', '.', '[SEP]']\n",
      "1\n",
      "At\n",
      "521\n",
      "523\n",
      "[1]\n",
      "2\n",
      "that\n",
      "524\n",
      "528\n",
      "[1, 2]\n",
      "True\n",
      "3\n",
      "time\n",
      "529\n",
      "533\n",
      "[2, 3]\n",
      "True\n",
      "4\n",
      ",\n",
      "534\n",
      "535\n",
      "[3, 4]\n",
      "5\n",
      "she\n",
      "536\n",
      "539\n",
      "[4, 5]\n",
      "6\n",
      "also\n",
      "540\n",
      "544\n",
      "[5, 6]\n",
      "7\n",
      "had\n",
      "545\n",
      "548\n",
      "[6, 7]\n",
      "8\n",
      "cat\n",
      "549\n",
      "552\n",
      "[7, 8]\n",
      "True\n",
      "9\n",
      "scratch\n",
      "553\n",
      "560\n",
      "[8, 9]\n",
      "True\n",
      "10\n",
      "fever\n",
      "561\n",
      "566\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "and\n",
      "567\n",
      "570\n",
      "[10, 11]\n",
      "12\n",
      "she\n",
      "571\n",
      "574\n",
      "[11, 12]\n",
      "13\n",
      "had\n",
      "575\n",
      "578\n",
      "[12, 13]\n",
      "15\n",
      "resection\n",
      "579\n",
      "588\n",
      "[13, 14, 15]\n",
      "True\n",
      "16\n",
      "of\n",
      "589\n",
      "591\n",
      "[15, 16]\n",
      "17\n",
      "an\n",
      "592\n",
      "594\n",
      "[16, 17]\n",
      "True\n",
      "20\n",
      "abscess\n",
      "595\n",
      "602\n",
      "[17, 18, 19, 20]\n",
      "True\n",
      "21\n",
      "in\n",
      "603\n",
      "605\n",
      "[20, 21]\n",
      "True\n",
      "22\n",
      "the\n",
      "606\n",
      "609\n",
      "[21, 22]\n",
      "True\n",
      "23\n",
      "left\n",
      "610\n",
      "614\n",
      "[22, 23]\n",
      "True\n",
      "24\n",
      "lower\n",
      "615\n",
      "620\n",
      "[23, 24]\n",
      "True\n",
      "27\n",
      "extremity\n",
      "621\n",
      "630\n",
      "[24, 25, 26, 27]\n",
      "True\n",
      "28\n",
      ".\n",
      "631\n",
      "632\n",
      "[27, 28]\n",
      "['[CLS]', 'she', 'has', 'not', 'used', 'any', 'anti', 'retro', '##vira', '##l', 'therapy', 'since', 'then', ',', 'because', 'of', 'pan', '##cy', '##top', '##enia', 'and', 'vomiting', 'on', 'dd', '##i', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "633\n",
      "636\n",
      "[1]\n",
      "2\n",
      "has\n",
      "637\n",
      "640\n",
      "[1, 2]\n",
      "3\n",
      "not\n",
      "641\n",
      "644\n",
      "[2, 3]\n",
      "4\n",
      "used\n",
      "645\n",
      "649\n",
      "[3, 4]\n",
      "5\n",
      "any\n",
      "650\n",
      "653\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      "anti\n",
      "654\n",
      "658\n",
      "[5, 6]\n",
      "True\n",
      "9\n",
      "retroviral\n",
      "659\n",
      "669\n",
      "[6, 7, 8, 9]\n",
      "True\n",
      "10\n",
      "therapy\n",
      "670\n",
      "677\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "since\n",
      "678\n",
      "683\n",
      "[10, 11]\n",
      "12\n",
      "then\n",
      "684\n",
      "688\n",
      "[11, 12]\n",
      "13\n",
      ",\n",
      "689\n",
      "690\n",
      "[12, 13]\n",
      "14\n",
      "because\n",
      "691\n",
      "698\n",
      "[13, 14]\n",
      "15\n",
      "of\n",
      "699\n",
      "701\n",
      "[14, 15]\n",
      "19\n",
      "pancytopenia\n",
      "702\n",
      "714\n",
      "[15, 16, 17, 18, 19]\n",
      "True\n",
      "20\n",
      "and\n",
      "715\n",
      "718\n",
      "[19, 20]\n",
      "21\n",
      "vomiting\n",
      "719\n",
      "727\n",
      "[20, 21]\n",
      "True\n",
      "22\n",
      "on\n",
      "728\n",
      "730\n",
      "[21, 22]\n",
      "24\n",
      "DDI\n",
      "731\n",
      "734\n",
      "[22, 23, 24]\n",
      "True\n",
      "25\n",
      ".\n",
      "735\n",
      "736\n",
      "[24, 25]\n",
      "['[CLS]', 'she', 'has', 'complaints', 'of', 'nausea', 'and', 'vomiting', 'as', 'well', 'as', 'left', 'upper', 'quadrant', 'pain', 'on', 'and', 'off', 'getting', 'progressively', 'worse', 'over', 'the', 'past', 'month', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "737\n",
      "740\n",
      "[1]\n",
      "2\n",
      "has\n",
      "741\n",
      "744\n",
      "[1, 2]\n",
      "3\n",
      "complaints\n",
      "745\n",
      "755\n",
      "[2, 3]\n",
      "4\n",
      "of\n",
      "756\n",
      "758\n",
      "[3, 4]\n",
      "5\n",
      "nausea\n",
      "759\n",
      "765\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      "and\n",
      "766\n",
      "769\n",
      "[5, 6]\n",
      "7\n",
      "vomiting\n",
      "770\n",
      "778\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "as\n",
      "779\n",
      "781\n",
      "[7, 8]\n",
      "9\n",
      "well\n",
      "782\n",
      "786\n",
      "[8, 9]\n",
      "10\n",
      "as\n",
      "787\n",
      "789\n",
      "[9, 10]\n",
      "11\n",
      "left\n",
      "790\n",
      "794\n",
      "[10, 11]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n",
      "12\n",
      "upper\n",
      "795\n",
      "800\n",
      "[11, 12]\n",
      "True\n",
      "13\n",
      "quadrant\n",
      "801\n",
      "809\n",
      "[12, 13]\n",
      "True\n",
      "14\n",
      "pain\n",
      "810\n",
      "814\n",
      "[13, 14]\n",
      "True\n",
      "15\n",
      "on\n",
      "815\n",
      "817\n",
      "[14, 15]\n",
      "16\n",
      "and\n",
      "818\n",
      "821\n",
      "[15, 16]\n",
      "17\n",
      "off\n",
      "822\n",
      "825\n",
      "[16, 17]\n",
      "18\n",
      "getting\n",
      "826\n",
      "833\n",
      "[17, 18]\n",
      "19\n",
      "progressively\n",
      "834\n",
      "847\n",
      "[18, 19]\n",
      "20\n",
      "worse\n",
      "848\n",
      "853\n",
      "[19, 20]\n",
      "True\n",
      "21\n",
      "over\n",
      "854\n",
      "858\n",
      "[20, 21]\n",
      "22\n",
      "the\n",
      "859\n",
      "862\n",
      "[21, 22]\n",
      "True\n",
      "23\n",
      "past\n",
      "863\n",
      "867\n",
      "[22, 23]\n",
      "True\n",
      "24\n",
      "month\n",
      "868\n",
      "873\n",
      "[23, 24]\n",
      "True\n",
      "25\n",
      ".\n",
      "874\n",
      "875\n",
      "[24, 25]\n",
      "['[CLS]', 'she', 'has', 'had', 'similar', 'pain', 'intermittent', '##ly', 'for', 'last', 'year', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "876\n",
      "879\n",
      "[1]\n",
      "2\n",
      "has\n",
      "880\n",
      "883\n",
      "[1, 2]\n",
      "3\n",
      "had\n",
      "884\n",
      "887\n",
      "[2, 3]\n",
      "4\n",
      "similar\n",
      "888\n",
      "895\n",
      "[3, 4]\n",
      "True\n",
      "5\n",
      "pain\n",
      "896\n",
      "900\n",
      "[4, 5]\n",
      "True\n",
      "7\n",
      "intermittently\n",
      "901\n",
      "915\n",
      "[5, 6, 7]\n",
      "8\n",
      "for\n",
      "916\n",
      "919\n",
      "[7, 8]\n",
      "9\n",
      "last\n",
      "920\n",
      "924\n",
      "[8, 9]\n",
      "True\n",
      "10\n",
      "year\n",
      "925\n",
      "929\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      ".\n",
      "930\n",
      "931\n",
      "[10, 11]\n",
      "['[CLS]', 'she', 'described', 'the', 'pain', 'as', 'a', 'burning', 'pain', 'which', 'is', 'position', '##al', ',', 'worse', 'when', 'she', 'walks', 'or', 'does', 'any', 'type', 'of', 'exercise', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "932\n",
      "935\n",
      "[1]\n",
      "2\n",
      "described\n",
      "936\n",
      "945\n",
      "[1, 2]\n",
      "3\n",
      "the\n",
      "946\n",
      "949\n",
      "[2, 3]\n",
      "True\n",
      "4\n",
      "pain\n",
      "950\n",
      "954\n",
      "[3, 4]\n",
      "True\n",
      "5\n",
      "as\n",
      "955\n",
      "957\n",
      "[4, 5]\n",
      "6\n",
      "a\n",
      "958\n",
      "959\n",
      "[5, 6]\n",
      "True\n",
      "7\n",
      "burning\n",
      "960\n",
      "967\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "pain\n",
      "968\n",
      "972\n",
      "[7, 8]\n",
      "True\n",
      "9\n",
      "which\n",
      "973\n",
      "978\n",
      "[8, 9]\n",
      "10\n",
      "is\n",
      "979\n",
      "981\n",
      "[9, 10]\n",
      "12\n",
      "positional\n",
      "982\n",
      "992\n",
      "[10, 11, 12]\n",
      "13\n",
      ",\n",
      "993\n",
      "994\n",
      "[12, 13]\n",
      "14\n",
      "worse\n",
      "995\n",
      "1000\n",
      "[13, 14]\n",
      "True\n",
      "15\n",
      "when\n",
      "1001\n",
      "1005\n",
      "[14, 15]\n",
      "16\n",
      "she\n",
      "1006\n",
      "1009\n",
      "[15, 16]\n",
      "17\n",
      "walks\n",
      "1010\n",
      "1015\n",
      "[16, 17]\n",
      "True\n",
      "18\n",
      "or\n",
      "1016\n",
      "1018\n",
      "[17, 18]\n",
      "19\n",
      "does\n",
      "1019\n",
      "1023\n",
      "[18, 19]\n",
      "20\n",
      "any\n",
      "1024\n",
      "1027\n",
      "[19, 20]\n",
      "21\n",
      "type\n",
      "1028\n",
      "1032\n",
      "[20, 21]\n",
      "22\n",
      "of\n",
      "1033\n",
      "1035\n",
      "[21, 22]\n",
      "23\n",
      "exercise\n",
      "1036\n",
      "1044\n",
      "[22, 23]\n",
      "True\n",
      "24\n",
      ".\n",
      "1045\n",
      "1046\n",
      "[23, 24]\n",
      "['[CLS]', 'she', 'has', 'no', 'relief', 'from', 'ant', '##ac', '##ids', 'or', 'h', '##2', 'block', '##ers', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "1047\n",
      "1050\n",
      "[1]\n",
      "2\n",
      "has\n",
      "1051\n",
      "1054\n",
      "[1, 2]\n",
      "3\n",
      "no\n",
      "1055\n",
      "1057\n",
      "[2, 3]\n",
      "4\n",
      "relief\n",
      "1058\n",
      "1064\n",
      "[3, 4]\n",
      "True\n",
      "5\n",
      "from\n",
      "1065\n",
      "1069\n",
      "[4, 5]\n",
      "8\n",
      "antacids\n",
      "1070\n",
      "1078\n",
      "[5, 6, 7, 8]\n",
      "True\n",
      "9\n",
      "or\n",
      "1079\n",
      "1081\n",
      "[8, 9]\n",
      "11\n",
      "H2\n",
      "1082\n",
      "1084\n",
      "[9, 10, 11]\n",
      "True\n",
      "13\n",
      "blockers\n",
      "1085\n",
      "1093\n",
      "[11, 12, 13]\n",
      "True\n",
      "14\n",
      ".\n",
      "1094\n",
      "1095\n",
      "[13, 14]\n",
      "['[CLS]', 'in', '10', '/', '92', ',', 'she', 'had', 'a', 'ct', 'scan', 'which', 'showed', 'fatty', 'in', '##filtration', 'of', 'her', 'liver', 'diffuse', '##ly', 'with', 'a', '1', 'cm', 'cy', '##st', 'in', 'the', 'right', 'lobe', 'of', 'the', 'liver', '.', '[SEP]']\n",
      "1\n",
      "In\n",
      "1096\n",
      "1098\n",
      "[1]\n",
      "4\n",
      "10/92\n",
      "1099\n",
      "1104\n",
      "[1, 2, 3, 4]\n",
      "True\n",
      "5\n",
      ",\n",
      "1105\n",
      "1106\n",
      "[4, 5]\n",
      "6\n",
      "she\n",
      "1107\n",
      "1110\n",
      "[5, 6]\n",
      "7\n",
      "had\n",
      "1111\n",
      "1114\n",
      "[6, 7]\n",
      "8\n",
      "a\n",
      "1115\n",
      "1116\n",
      "[7, 8]\n",
      "True\n",
      "9\n",
      "CT\n",
      "1117\n",
      "1119\n",
      "[8, 9]\n",
      "True\n",
      "10\n",
      "scan\n",
      "1120\n",
      "1124\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "which\n",
      "1125\n",
      "1130\n",
      "[10, 11]\n",
      "12\n",
      "showed\n",
      "1131\n",
      "1137\n",
      "[11, 12]\n",
      "13\n",
      "fatty\n",
      "1138\n",
      "1143\n",
      "[12, 13]\n",
      "True\n",
      "15\n",
      "infiltration\n",
      "1144\n",
      "1156\n",
      "[13, 14, 15]\n",
      "True\n",
      "16\n",
      "of\n",
      "1157\n",
      "1159\n",
      "[15, 16]\n",
      "True\n",
      "17\n",
      "her\n",
      "1160\n",
      "1163\n",
      "[16, 17]\n",
      "True\n",
      "18\n",
      "liver\n",
      "1164\n",
      "1169\n",
      "[17, 18]\n",
      "True\n",
      "20\n",
      "diffusely\n",
      "1170\n",
      "1179\n",
      "[18, 19, 20]\n",
      "True\n",
      "21\n",
      "with\n",
      "1180\n",
      "1184\n",
      "[20, 21]\n",
      "22\n",
      "a\n",
      "1185\n",
      "1186\n",
      "[21, 22]\n",
      "True\n",
      "23\n",
      "1\n",
      "1187\n",
      "1188\n",
      "[22, 23]\n",
      "True\n",
      "24\n",
      "cm\n",
      "1189\n",
      "1191\n",
      "[23, 24]\n",
      "True\n",
      "26\n",
      "cyst\n",
      "1192\n",
      "1196\n",
      "[24, 25, 26]\n",
      "True\n",
      "27\n",
      "in\n",
      "1197\n",
      "1199\n",
      "[26, 27]\n",
      "True\n",
      "28\n",
      "the\n",
      "1200\n",
      "1203\n",
      "[27, 28]\n",
      "True\n",
      "29\n",
      "right\n",
      "1204\n",
      "1209\n",
      "[28, 29]\n",
      "True\n",
      "30\n",
      "lobe\n",
      "1210\n",
      "1214\n",
      "[29, 30]\n",
      "True\n",
      "31\n",
      "of\n",
      "1215\n",
      "1217\n",
      "[30, 31]\n",
      "True\n",
      "32\n",
      "the\n",
      "1218\n",
      "1221\n",
      "[31, 32]\n",
      "True\n",
      "33\n",
      "liver\n",
      "1222\n",
      "1227\n",
      "[32, 33]\n",
      "True\n",
      "34\n",
      ".\n",
      "1228\n",
      "1229\n",
      "[33, 34]\n",
      "['[CLS]', 'she', 'had', 'a', 'normal', 'pan', '##cre', '##as', 'at', 'that', 'time', ',', 'however', ',', 'hyper', '##den', '##se', 'kidney', '##s', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "1230\n",
      "1233\n",
      "[1]\n",
      "2\n",
      "had\n",
      "1234\n",
      "1237\n",
      "[1, 2]\n",
      "3\n",
      "a\n",
      "1238\n",
      "1239\n",
      "[2, 3]\n",
      "True\n",
      "4\n",
      "normal\n",
      "1240\n",
      "1246\n",
      "[3, 4]\n",
      "True\n",
      "7\n",
      "pancreas\n",
      "1247\n",
      "1255\n",
      "[4, 5, 6, 7]\n",
      "True\n",
      "8\n",
      "at\n",
      "1256\n",
      "1258\n",
      "[7, 8]\n",
      "9\n",
      "that\n",
      "1259\n",
      "1263\n",
      "[8, 9]\n",
      "True\n",
      "10\n",
      "time\n",
      "1264\n",
      "1268\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      ",\n",
      "1269\n",
      "1270\n",
      "[10, 11]\n",
      "12\n",
      "however\n",
      "1271\n",
      "1278\n",
      "[11, 12]\n",
      "13\n",
      ",\n",
      "1279\n",
      "1280\n",
      "[12, 13]\n",
      "16\n",
      "hyperdense\n",
      "1281\n",
      "1291\n",
      "[13, 14, 15, 16]\n",
      "True\n",
      "18\n",
      "kidneys\n",
      "1292\n",
      "1299\n",
      "[16, 17, 18]\n",
      "True\n",
      "19\n",
      ".\n",
      "1300\n",
      "1301\n",
      "[18, 19]\n",
      "['[CLS]', 'her', 'al', '##kali', '##ne', 'ph', '##os', '##pha', '##tase', 'was', 'slightly', 'elevated', 'but', 'otherwise', 'relatively', 'normal', '.', '[SEP]']\n",
      "1\n",
      "Her\n",
      "1302\n",
      "1305\n",
      "[1]\n",
      "True\n",
      "4\n",
      "alkaline\n",
      "1306\n",
      "1314\n",
      "[1, 2, 3, 4]\n",
      "True\n",
      "8\n",
      "phosphatase\n",
      "1315\n",
      "1326\n",
      "[4, 5, 6, 7, 8]\n",
      "True\n",
      "9\n",
      "was\n",
      "1327\n",
      "1330\n",
      "[8, 9]\n",
      "10\n",
      "slightly\n",
      "1331\n",
      "1339\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "elevated\n",
      "1340\n",
      "1348\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      "but\n",
      "1349\n",
      "1352\n",
      "[11, 12]\n",
      "13\n",
      "otherwise\n",
      "1353\n",
      "1362\n",
      "[12, 13]\n",
      "14\n",
      "relatively\n",
      "1363\n",
      "1373\n",
      "[13, 14]\n",
      "15\n",
      "normal\n",
      "1374\n",
      "1380\n",
      "[14, 15]\n",
      "True\n",
      "16\n",
      ".\n",
      "1381\n",
      "1382\n",
      "[15, 16]\n",
      "['[CLS]', 'her', 'amy', '##lase', 'was', 'mildly', 'elevated', 'but', 'has', 'been', 'down', 'since', 'then', '.', '[SEP]']\n",
      "1\n",
      "Her\n",
      "1383\n",
      "1386\n",
      "[1]\n",
      "True\n",
      "3\n",
      "amylase\n",
      "1387\n",
      "1394\n",
      "[1, 2, 3]\n",
      "True\n",
      "4\n",
      "was\n",
      "1395\n",
      "1398\n",
      "[3, 4]\n",
      "5\n",
      "mildly\n",
      "1399\n",
      "1405\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      "elevated\n",
      "1406\n",
      "1414\n",
      "[5, 6]\n",
      "True\n",
      "7\n",
      "but\n",
      "1415\n",
      "1418\n",
      "[6, 7]\n",
      "8\n",
      "has\n",
      "1419\n",
      "1422\n",
      "[7, 8]\n",
      "9\n",
      "been\n",
      "1423\n",
      "1427\n",
      "[8, 9]\n",
      "10\n",
      "down\n",
      "1428\n",
      "1432\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "since\n",
      "1433\n",
      "1438\n",
      "[10, 11]\n",
      "12\n",
      "then\n",
      "1439\n",
      "1443\n",
      "[11, 12]\n",
      "13\n",
      ".\n",
      "1444\n",
      "1445\n",
      "[12, 13]\n",
      "['[CLS]', 'the', 'patient', 'has', 'had', 'progressive', 'failure', 'to', 'thrive', 'and', 'steady', 'weight', 'loss', '.', '[SEP]']\n",
      "1\n",
      "The\n",
      "1446\n",
      "1449\n",
      "[1]\n",
      "2\n",
      "patient\n",
      "1450\n",
      "1457\n",
      "[1, 2]\n",
      "3\n",
      "has\n",
      "1458\n",
      "1461\n",
      "[2, 3]\n",
      "4\n",
      "had\n",
      "1462\n",
      "1465\n",
      "[3, 4]\n",
      "5\n",
      "progressive\n",
      "1466\n",
      "1477\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      "failure\n",
      "1478\n",
      "1485\n",
      "[5, 6]\n",
      "True\n",
      "7\n",
      "to\n",
      "1486\n",
      "1488\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "thrive\n",
      "1489\n",
      "1495\n",
      "[7, 8]\n",
      "True\n",
      "9\n",
      "and\n",
      "1496\n",
      "1499\n",
      "[8, 9]\n",
      "10\n",
      "steady\n",
      "1500\n",
      "1506\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "weight\n",
      "1507\n",
      "1513\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      "loss\n",
      "1514\n",
      "1518\n",
      "[11, 12]\n",
      "True\n",
      "13\n",
      ".\n",
      "1519\n",
      "1520\n",
      "[12, 13]\n",
      "['[CLS]', 'she', 'was', 'brought', 'in', 'for', 'an', 'es', '##op', '##ha', '##go', '##gas', '##tro', '##du', '##ode', '##nos', '##co', '##py', 'on', '9', '/', '26', 'but', 'she', 'basically', 'was', 'not', 'sufficiently', 'se', '##date', '##d', 'and', 'read', '##mit', '##ted', 'at', 'this', 'time', 'for', 'a', 'gi', 'work', '-', 'up', 'as', 'well', 'as', 'an', 'evaluation', 'of', 'new', 'abs', '##ces', '##s', 'in', 'her', 'left', 'lower', 'calf', 'and', 'right', 'medial', 'lower', 'ex', '##tre', '##mity', 'quad', '##rice', '##ps', 'muscle', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "1521\n",
      "1524\n",
      "[1]\n",
      "2\n",
      "was\n",
      "1525\n",
      "1528\n",
      "[1, 2]\n",
      "3\n",
      "brought\n",
      "1529\n",
      "1536\n",
      "[2, 3]\n",
      "4\n",
      "in\n",
      "1537\n",
      "1539\n",
      "[3, 4]\n",
      "5\n",
      "for\n",
      "1540\n",
      "1543\n",
      "[4, 5]\n",
      "6\n",
      "an\n",
      "1544\n",
      "1546\n",
      "[5, 6]\n",
      "True\n",
      "17\n",
      "esophagogastroduodenoscopy\n",
      "1547\n",
      "1573\n",
      "[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]\n",
      "True\n",
      "18\n",
      "on\n",
      "1574\n",
      "1576\n",
      "[17, 18]\n",
      "21\n",
      "9/26\n",
      "1577\n",
      "1581\n",
      "[18, 19, 20, 21]\n",
      "True\n",
      "22\n",
      "but\n",
      "1582\n",
      "1585\n",
      "[21, 22]\n",
      "23\n",
      "she\n",
      "1586\n",
      "1589\n",
      "[22, 23]\n",
      "24\n",
      "basically\n",
      "1590\n",
      "1599\n",
      "[23, 24]\n",
      "25\n",
      "was\n",
      "1600\n",
      "1603\n",
      "[24, 25]\n",
      "26\n",
      "not\n",
      "1604\n",
      "1607\n",
      "[25, 26]\n",
      "27\n",
      "sufficiently\n",
      "1608\n",
      "1620\n",
      "[26, 27]\n",
      "30\n",
      "sedated\n",
      "1621\n",
      "1628\n",
      "[27, 28, 29, 30]\n",
      "True\n",
      "31\n",
      "and\n",
      "1629\n",
      "1632\n",
      "[30, 31]\n",
      "34\n",
      "readmitted\n",
      "1633\n",
      "1643\n",
      "[31, 32, 33, 34]\n",
      "True\n",
      "35\n",
      "at\n",
      "1644\n",
      "1646\n",
      "[34, 35]\n",
      "36\n",
      "this\n",
      "1647\n",
      "1651\n",
      "[35, 36]\n",
      "True\n",
      "37\n",
      "time\n",
      "1652\n",
      "1656\n",
      "[36, 37]\n",
      "True\n",
      "38\n",
      "for\n",
      "1657\n",
      "1660\n",
      "[37, 38]\n",
      "39\n",
      "a\n",
      "1661\n",
      "1662\n",
      "[38, 39]\n",
      "True\n",
      "40\n",
      "GI\n",
      "1663\n",
      "1665\n",
      "[39, 40]\n",
      "True\n",
      "43\n",
      "work-up\n",
      "1666\n",
      "1673\n",
      "[40, 41, 42, 43]\n",
      "True\n",
      "44\n",
      "as\n",
      "1674\n",
      "1676\n",
      "[43, 44]\n",
      "45\n",
      "well\n",
      "1677\n",
      "1681\n",
      "[44, 45]\n",
      "46\n",
      "as\n",
      "1682\n",
      "1684\n",
      "[45, 46]\n",
      "47\n",
      "an\n",
      "1685\n",
      "1687\n",
      "[46, 47]\n",
      "True\n",
      "48\n",
      "evaluation\n",
      "1688\n",
      "1698\n",
      "[47, 48]\n",
      "True\n",
      "49\n",
      "of\n",
      "1699\n",
      "1701\n",
      "[48, 49]\n",
      "50\n",
      "new\n",
      "1702\n",
      "1705\n",
      "[49, 50]\n",
      "True\n",
      "53\n",
      "abscess\n",
      "1706\n",
      "1713\n",
      "[50, 51, 52, 53]\n",
      "True\n",
      "54\n",
      "in\n",
      "1714\n",
      "1716\n",
      "[53, 54]\n",
      "True\n",
      "55\n",
      "her\n",
      "1717\n",
      "1720\n",
      "[54, 55]\n",
      "True\n",
      "56\n",
      "left\n",
      "1721\n",
      "1725\n",
      "[55, 56]\n",
      "True\n",
      "57\n",
      "lower\n",
      "1726\n",
      "1731\n",
      "[56, 57]\n",
      "True\n",
      "58\n",
      "calf\n",
      "1732\n",
      "1736\n",
      "[57, 58]\n",
      "True\n",
      "59\n",
      "and\n",
      "1737\n",
      "1740\n",
      "[58, 59]\n",
      "60\n",
      "right\n",
      "1741\n",
      "1746\n",
      "[59, 60]\n",
      "61\n",
      "medial\n",
      "1747\n",
      "1753\n",
      "[60, 61]\n",
      "62\n",
      "lower\n",
      "1754\n",
      "1759\n",
      "[61, 62]\n",
      "65\n",
      "extremity\n",
      "1760\n",
      "1769\n",
      "[62, 63, 64, 65]\n",
      "68\n",
      "quadriceps\n",
      "1770\n",
      "1780\n",
      "[65, 66, 67, 68]\n",
      "69\n",
      "muscle\n",
      "1781\n",
      "1787\n",
      "[68, 69]\n",
      "70\n",
      ".\n",
      "1788\n",
      "1789\n",
      "[69, 70]\n",
      "['[CLS]', 'she', 'was', 'also', 'admitted', 'to', 'be', 'connected', 'up', 'with', 'social', 'services', 'for', 'hiv', 'patients', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "1790\n",
      "1793\n",
      "[1]\n",
      "2\n",
      "was\n",
      "1794\n",
      "1797\n",
      "[1, 2]\n",
      "3\n",
      "also\n",
      "1798\n",
      "1802\n",
      "[2, 3]\n",
      "4\n",
      "admitted\n",
      "1803\n",
      "1811\n",
      "[3, 4]\n",
      "True\n",
      "5\n",
      "to\n",
      "1812\n",
      "1814\n",
      "[4, 5]\n",
      "6\n",
      "be\n",
      "1815\n",
      "1817\n",
      "[5, 6]\n",
      "7\n",
      "connected\n",
      "1818\n",
      "1827\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "up\n",
      "1828\n",
      "1830\n",
      "[7, 8]\n",
      "True\n",
      "9\n",
      "with\n",
      "1831\n",
      "1835\n",
      "[8, 9]\n",
      "True\n",
      "10\n",
      "social\n",
      "1836\n",
      "1842\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "services\n",
      "1843\n",
      "1851\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      "for\n",
      "1852\n",
      "1855\n",
      "[11, 12]\n",
      "13\n",
      "HIV\n",
      "1856\n",
      "1859\n",
      "[12, 13]\n",
      "True\n",
      "14\n",
      "patients\n",
      "1860\n",
      "1868\n",
      "[13, 14]\n",
      "15\n",
      ".\n",
      "1869\n",
      "1870\n",
      "[14, 15]\n",
      "['[CLS]', 'hospital', 'course', ':', '[SEP]']\n",
      "1\n",
      "HOSPITAL\n",
      "1871\n",
      "1879\n",
      "[1]\n",
      "2\n",
      "COURSE\n",
      "1880\n",
      "1886\n",
      "[1, 2]\n",
      "3\n",
      ":\n",
      "1887\n",
      "1888\n",
      "[2, 3]\n",
      "['[CLS]', 'the', 'patient', 'was', 'admitted', 'and', 'many', 'cultures', 'were', 'sent', 'which', 'were', 'all', 'negative', '.', '[SEP]']\n",
      "1\n",
      "The\n",
      "1889\n",
      "1892\n",
      "[1]\n",
      "2\n",
      "patient\n",
      "1893\n",
      "1900\n",
      "[1, 2]\n",
      "3\n",
      "was\n",
      "1901\n",
      "1904\n",
      "[2, 3]\n",
      "4\n",
      "admitted\n",
      "1905\n",
      "1913\n",
      "[3, 4]\n",
      "True\n",
      "5\n",
      "and\n",
      "1914\n",
      "1917\n",
      "[4, 5]\n",
      "6\n",
      "many\n",
      "1918\n",
      "1922\n",
      "[5, 6]\n",
      "True\n",
      "7\n",
      "cultures\n",
      "1923\n",
      "1931\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "were\n",
      "1932\n",
      "1936\n",
      "[7, 8]\n",
      "9\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sent\n",
      "1937\n",
      "1941\n",
      "[8, 9]\n",
      "10\n",
      "which\n",
      "1942\n",
      "1947\n",
      "[9, 10]\n",
      "11\n",
      "were\n",
      "1948\n",
      "1952\n",
      "[10, 11]\n",
      "12\n",
      "all\n",
      "1953\n",
      "1956\n",
      "[11, 12]\n",
      "True\n",
      "13\n",
      "negative\n",
      "1957\n",
      "1965\n",
      "[12, 13]\n",
      "True\n",
      "14\n",
      ".\n",
      "1966\n",
      "1967\n",
      "[13, 14]\n",
      "['[CLS]', 'she', 'did', 'not', 'have', 'any', 'of', 'her', 'pain', 'in', 'the', 'hospital', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "1968\n",
      "1971\n",
      "[1]\n",
      "2\n",
      "did\n",
      "1972\n",
      "1975\n",
      "[1, 2]\n",
      "3\n",
      "not\n",
      "1976\n",
      "1979\n",
      "[2, 3]\n",
      "4\n",
      "have\n",
      "1980\n",
      "1984\n",
      "[3, 4]\n",
      "5\n",
      "any\n",
      "1985\n",
      "1988\n",
      "[4, 5]\n",
      "6\n",
      "of\n",
      "1989\n",
      "1991\n",
      "[5, 6]\n",
      "7\n",
      "her\n",
      "1992\n",
      "1995\n",
      "[6, 7]\n",
      "True\n",
      "8\n",
      "pain\n",
      "1996\n",
      "2000\n",
      "[7, 8]\n",
      "True\n",
      "9\n",
      "in\n",
      "2001\n",
      "2003\n",
      "[8, 9]\n",
      "10\n",
      "the\n",
      "2004\n",
      "2007\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "hospital\n",
      "2008\n",
      "2016\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      ".\n",
      "2017\n",
      "2018\n",
      "[11, 12]\n",
      "['[CLS]', 'on', 'the', 'third', 'hospital', 'day', ',', 'she', 'did', 'have', 'some', 'pain', 'and', 'was', 'treated', 'with', 'per', '##co', '##ce', '##t', '.', '[SEP]']\n",
      "1\n",
      "On\n",
      "2019\n",
      "2021\n",
      "[1]\n",
      "2\n",
      "the\n",
      "2022\n",
      "2025\n",
      "[1, 2]\n",
      "True\n",
      "3\n",
      "third\n",
      "2026\n",
      "2031\n",
      "[2, 3]\n",
      "True\n",
      "4\n",
      "hospital\n",
      "2032\n",
      "2040\n",
      "[3, 4]\n",
      "True\n",
      "5\n",
      "day\n",
      "2041\n",
      "2044\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      ",\n",
      "2045\n",
      "2046\n",
      "[5, 6]\n",
      "7\n",
      "she\n",
      "2047\n",
      "2050\n",
      "[6, 7]\n",
      "8\n",
      "did\n",
      "2051\n",
      "2054\n",
      "[7, 8]\n",
      "9\n",
      "have\n",
      "2055\n",
      "2059\n",
      "[8, 9]\n",
      "10\n",
      "some\n",
      "2060\n",
      "2064\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "pain\n",
      "2065\n",
      "2069\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      "and\n",
      "2070\n",
      "2073\n",
      "[11, 12]\n",
      "13\n",
      "was\n",
      "2074\n",
      "2077\n",
      "[12, 13]\n",
      "14\n",
      "treated\n",
      "2078\n",
      "2085\n",
      "[13, 14]\n",
      "15\n",
      "with\n",
      "2086\n",
      "2090\n",
      "[14, 15]\n",
      "19\n",
      "Percocet\n",
      "2091\n",
      "2099\n",
      "[15, 16, 17, 18, 19]\n",
      "True\n",
      "20\n",
      ".\n",
      "2100\n",
      "2101\n",
      "[19, 20]\n",
      "['[CLS]', 'she', 'went', 'for', 'a', 'de', '##bri', '##de', '##ment', 'of', 'her', 'left', 'calf', 'les', '##ion', 'on', '10', '/', '2', '/', '93', 'and', 'was', 'started', 'empirical', '##ly', 'on', 'iv', 'ce', '##ft', '##ria', '##xon', '##e', 'which', 'was', 'changed', 'to', 'po', 'do', '##xy', '##cy', '##cl', '##ine', 'on', 'the', 'day', 'of', 'discharge', '.', '[SEP]']\n",
      "1\n",
      "She\n",
      "2102\n",
      "2105\n",
      "[1]\n",
      "2\n",
      "went\n",
      "2106\n",
      "2110\n",
      "[1, 2]\n",
      "3\n",
      "for\n",
      "2111\n",
      "2114\n",
      "[2, 3]\n",
      "4\n",
      "a\n",
      "2115\n",
      "2116\n",
      "[3, 4]\n",
      "True\n",
      "8\n",
      "debridement\n",
      "2117\n",
      "2128\n",
      "[4, 5, 6, 7, 8]\n",
      "True\n",
      "9\n",
      "of\n",
      "2129\n",
      "2131\n",
      "[8, 9]\n",
      "10\n",
      "her\n",
      "2132\n",
      "2135\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "left\n",
      "2136\n",
      "2140\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      "calf\n",
      "2141\n",
      "2145\n",
      "[11, 12]\n",
      "True\n",
      "14\n",
      "lesion\n",
      "2146\n",
      "2152\n",
      "[12, 13, 14]\n",
      "True\n",
      "15\n",
      "on\n",
      "2153\n",
      "2155\n",
      "[14, 15]\n",
      "20\n",
      "10/2/93\n",
      "2156\n",
      "2163\n",
      "[15, 16, 17, 18, 19, 20]\n",
      "True\n",
      "21\n",
      "and\n",
      "2164\n",
      "2167\n",
      "[20, 21]\n",
      "22\n",
      "was\n",
      "2168\n",
      "2171\n",
      "[21, 22]\n",
      "23\n",
      "started\n",
      "2172\n",
      "2179\n",
      "[22, 23]\n",
      "25\n",
      "empirically\n",
      "2180\n",
      "2191\n",
      "[23, 24, 25]\n",
      "26\n",
      "on\n",
      "2192\n",
      "2194\n",
      "[25, 26]\n",
      "27\n",
      "IV\n",
      "2195\n",
      "2197\n",
      "[26, 27]\n",
      "True\n",
      "32\n",
      "ceftriaxone\n",
      "2198\n",
      "2209\n",
      "[27, 28, 29, 30, 31, 32]\n",
      "True\n",
      "33\n",
      "which\n",
      "2210\n",
      "2215\n",
      "[32, 33]\n",
      "34\n",
      "was\n",
      "2216\n",
      "2219\n",
      "[33, 34]\n",
      "35\n",
      "changed\n",
      "2220\n",
      "2227\n",
      "[34, 35]\n",
      "36\n",
      "to\n",
      "2228\n",
      "2230\n",
      "[35, 36]\n",
      "37\n",
      "po\n",
      "2231\n",
      "2233\n",
      "[36, 37]\n",
      "True\n",
      "42\n",
      "doxycycline\n",
      "2234\n",
      "2245\n",
      "[37, 38, 39, 40, 41, 42]\n",
      "True\n",
      "43\n",
      "on\n",
      "2246\n",
      "2248\n",
      "[42, 43]\n",
      "44\n",
      "the\n",
      "2249\n",
      "2252\n",
      "[43, 44]\n",
      "True\n",
      "45\n",
      "day\n",
      "2253\n",
      "2256\n",
      "[44, 45]\n",
      "True\n",
      "46\n",
      "of\n",
      "2257\n",
      "2259\n",
      "[45, 46]\n",
      "True\n",
      "47\n",
      "discharge\n",
      "2260\n",
      "2269\n",
      "[46, 47]\n",
      "True\n",
      "True\n",
      "48\n",
      ".\n",
      "2270\n",
      "2271\n",
      "[47, 48]\n",
      "True\n",
      "['[CLS]', 'a', 'follow', '-', 'up', 'ct', 'scan', 'was', 'done', 'which', 'did', 'not', 'show', 'any', 'evidence', 'for', 'sp', '##len', '##ome', '##gal', '##y', 'or', 'he', '##pa', '##tom', '##ega', '##ly', '.', '[SEP]']\n",
      "1\n",
      "A\n",
      "2272\n",
      "2273\n",
      "[1]\n",
      "True\n",
      "4\n",
      "follow-up\n",
      "2274\n",
      "2283\n",
      "[1, 2, 3, 4]\n",
      "True\n",
      "5\n",
      "CT\n",
      "2284\n",
      "2286\n",
      "[4, 5]\n",
      "True\n",
      "6\n",
      "scan\n",
      "2287\n",
      "2291\n",
      "[5, 6]\n",
      "True\n",
      "7\n",
      "was\n",
      "2292\n",
      "2295\n",
      "[6, 7]\n",
      "8\n",
      "done\n",
      "2296\n",
      "2300\n",
      "[7, 8]\n",
      "9\n",
      "which\n",
      "2301\n",
      "2306\n",
      "[8, 9]\n",
      "10\n",
      "did\n",
      "2307\n",
      "2310\n",
      "[9, 10]\n",
      "11\n",
      "not\n",
      "2311\n",
      "2314\n",
      "[10, 11]\n",
      "12\n",
      "show\n",
      "2315\n",
      "2319\n",
      "[11, 12]\n",
      "True\n",
      "13\n",
      "any\n",
      "2320\n",
      "2323\n",
      "[12, 13]\n",
      "14\n",
      "evidence\n",
      "2324\n",
      "2332\n",
      "[13, 14]\n",
      "True\n",
      "15\n",
      "for\n",
      "2333\n",
      "2336\n",
      "[14, 15]\n",
      "20\n",
      "splenomegaly\n",
      "2337\n",
      "2349\n",
      "[15, 16, 17, 18, 19, 20]\n",
      "True\n",
      "21\n",
      "or\n",
      "2350\n",
      "2352\n",
      "[20, 21]\n",
      "26\n",
      "hepatomegaly\n",
      "2353\n",
      "2365\n",
      "[21, 22, 23, 24, 25, 26]\n",
      "True\n",
      "27\n",
      ".\n",
      "2366\n",
      "2367\n",
      "[26, 27]\n",
      "['[CLS]', 'the', '1', 'cm', 'cy', '##st', 'which', 'was', 'seen', 'in', '10', '/', '92', 'was', 'still', 'present', '.', '[SEP]']\n",
      "1\n",
      "The\n",
      "2368\n",
      "2371\n",
      "[1]\n",
      "True\n",
      "2\n",
      "1\n",
      "2372\n",
      "2373\n",
      "[1, 2]\n",
      "True\n",
      "3\n",
      "cm\n",
      "2374\n",
      "2376\n",
      "[2, 3]\n",
      "True\n",
      "5\n",
      "cyst\n",
      "2377\n",
      "2381\n",
      "[3, 4, 5]\n",
      "True\n",
      "6\n",
      "which\n",
      "2382\n",
      "2387\n",
      "[5, 6]\n",
      "7\n",
      "was\n",
      "2388\n",
      "2391\n",
      "[6, 7]\n",
      "8\n",
      "seen\n",
      "2392\n",
      "2396\n",
      "[7, 8]\n",
      "9\n",
      "in\n",
      "2397\n",
      "2399\n",
      "[8, 9]\n",
      "12\n",
      "10/92\n",
      "2400\n",
      "2405\n",
      "[9, 10, 11, 12]\n",
      "True\n",
      "13\n",
      "was\n",
      "2406\n",
      "2409\n",
      "[12, 13]\n",
      "14\n",
      "still\n",
      "2410\n",
      "2415\n",
      "[13, 14]\n",
      "15\n",
      "present\n",
      "2416\n",
      "2423\n",
      "[14, 15]\n",
      "16\n",
      ".\n",
      "2424\n",
      "2425\n",
      "[15, 16]\n",
      "['[CLS]', 'there', 'was', 'a', 'question', 'of', 'a', 'cy', '##st', 'in', 'her', 'kidney', 'with', 'a', 'stone', 'right', 'below', 'the', 'cy', '##st', ',', 'although', 'this', 'did', 'not', 'seem', 'to', 'be', 'clinical', '##ly', 'significant', '.', '[SEP]']\n",
      "1\n",
      "There\n",
      "2426\n",
      "2431\n",
      "[1]\n",
      "2\n",
      "was\n",
      "2432\n",
      "2435\n",
      "[1, 2]\n",
      "3\n",
      "a\n",
      "2436\n",
      "2437\n",
      "[2, 3]\n",
      "4\n",
      "question\n",
      "2438\n",
      "2446\n",
      "[3, 4]\n",
      "5\n",
      "of\n",
      "2447\n",
      "2449\n",
      "[4, 5]\n",
      "6\n",
      "a\n",
      "2450\n",
      "2451\n",
      "[5, 6]\n",
      "True\n",
      "8\n",
      "cyst\n",
      "2452\n",
      "2456\n",
      "[6, 7, 8]\n",
      "True\n",
      "9\n",
      "in\n",
      "2457\n",
      "2459\n",
      "[8, 9]\n",
      "True\n",
      "10\n",
      "her\n",
      "2460\n",
      "2463\n",
      "[9, 10]\n",
      "True\n",
      "11\n",
      "kidney\n",
      "2464\n",
      "2470\n",
      "[10, 11]\n",
      "True\n",
      "12\n",
      "with\n",
      "2471\n",
      "2475\n",
      "[11, 12]\n",
      "13\n",
      "a\n",
      "2476\n",
      "2477\n",
      "[12, 13]\n",
      "True\n",
      "14\n",
      "stone\n",
      "2478\n",
      "2483\n",
      "[13, 14]\n",
      "True\n",
      "15\n",
      "right\n",
      "2484\n",
      "2489\n",
      "[14, 15]\n",
      "16\n",
      "below\n",
      "2490\n",
      "2495\n",
      "[15, 16]\n",
      "17\n",
      "the\n",
      "2496\n",
      "2499\n",
      "[16, 17]\n",
      "True\n",
      "19\n",
      "cyst\n",
      "2500\n",
      "2504\n",
      "[17, 18, 19]\n",
      "True\n",
      "20\n",
      ",\n",
      "2505\n",
      "2506\n",
      "[19, 20]\n",
      "21\n",
      "although\n",
      "2507\n",
      "2515\n",
      "[20, 21]\n",
      "22\n",
      "this\n",
      "2516\n",
      "2520\n",
      "[21, 22]\n",
      "23\n",
      "did\n",
      "2521\n",
      "2524\n",
      "[22, 23]\n",
      "24\n",
      "not\n",
      "2525\n",
      "2528\n",
      "[23, 24]\n",
      "25\n",
      "seem\n",
      "2529\n",
      "2533\n",
      "[24, 25]\n",
      "26\n",
      "to\n",
      "2534\n",
      "2536\n",
      "[25, 26]\n",
      "27\n",
      "be\n",
      "2537\n",
      "2539\n",
      "[26, 27]\n",
      "29\n",
      "clinically\n",
      "2540\n",
      "2550\n",
      "[27, 28, 29]\n",
      "30\n",
      "significant\n",
      "2551\n",
      "2562\n",
      "[29, 30]\n",
      "31\n",
      ".\n",
      "2563\n",
      "2564\n",
      "[30, 31]\n"
     ]
    }
   ],
   "source": [
    "file = \"1.xml\"\n",
    "\n",
    "file_name = os.path.join(\"C:/Users/itsma/Documents/Capstone project/DS5500-capstone/train_data/\", file)\n",
    "tree = ET.parse(file_name)\n",
    "root = tree.getroot()\n",
    "discharge_note = DischargeNote(root,1)\n",
    "results = discharge_note.process_note()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[101, 9634, 3058, 1024, 5641, 1013, 2756, 1013, 2857, 11889, 3058, 1024, 2184, 1013, 5840, 1013, 2857, 102]\n"
     ]
    }
   ],
   "source": [
    "print(results[0][1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['[CLS]', 'admission', 'date', ':', '09', '/', '29', '/', '1993', 'discharge', 'date', ':', '10', '/', '04', '/', '1993', '[SEP]']\n"
     ]
    }
   ],
   "source": [
    "print(results[1][1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0, 1, 0, 2, 2, 2, 2, 2, 2, 1, 0, 2, 2, 2, 2, 2, 2, 0]\n"
     ]
    }
   ],
   "source": [
    "print(results[2][1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'sentences' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-76-5149386e30ff>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[1;31m# For every sentence...\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m \u001b[1;32mfor\u001b[0m \u001b[0msent\u001b[0m \u001b[1;32min\u001b[0m \u001b[0msentences\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      6\u001b[0m     \u001b[1;31m# `encode` will:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      7\u001b[0m     \u001b[1;31m#   (1) Tokenize the sentence.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'sentences' is not defined"
     ]
    }
   ],
   "source": [
    "# Tokenize all of the sentences and map the tokens to thier word IDs.\n",
    "input_ids = []\n",
    "\n",
    "# For every sentence...\n",
    "for sent in sentences:\n",
    "    # `encode` will:\n",
    "    #   (1) Tokenize the sentence.\n",
    "    #   (2) Prepend the `[CLS]` token to the start.\n",
    "    #   (3) Append the `[SEP]` token to the end.\n",
    "    #   (4) Map tokens to their IDs.\n",
    "    encoded_sent = tokenizer.encode(\n",
    "                        sent,                      # Sentence to encode.\n",
    "                        add_special_tokens = True, # Add '[CLS]' and '[SEP]'\n",
    "\n",
    "                        # This function also supports truncation and conversion\n",
    "                        # to pytorch tensors, but we need to do padding, so we\n",
    "                        # can't use these features :( .\n",
    "                        #max_length = 128,          # Truncate all sentences.\n",
    "                        #return_tensors = 'pt',     # Return pytorch tensors.\n",
    "                   )\n",
    "    \n",
    "    # Add the encoded sentence to the list.\n",
    "    input_ids.append(encoded_sent)\n",
    "\n",
    "# Print sentence 0, now as a list of IDs.\n",
    "print('Original: ', sentences[0])\n",
    "print('Token IDs:', input_ids[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "from progressbar import ProgressBar\n",
    "pbar = ProgressBar()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100% |########################################################################|\n"
     ]
    }
   ],
   "source": [
    "input_ids = []\n",
    "labels = []\n",
    "\n",
    "for file in pbar(os.listdir(\"C:/Users/itsma/Documents/Capstone project/DS5500-capstone/train_data/\")):\n",
    "    if file.endswith(\".xml\"):\n",
    "        file_name = os.path.join(\"C:/Users/itsma/Documents/Capstone project/DS5500-capstone/train_data/\", file)\n",
    "        tree = ET.parse(file_name)\n",
    "        root = tree.getroot()\n",
    "        discharge_note = DischargeNote(root,1)\n",
    "        results = discharge_note.process_note()\n",
    "        \n",
    "        sample_length = len(results[0])\n",
    "        \n",
    "        for index in range(sample_length):\n",
    "            input_ids.append(results[0][index])\n",
    "            labels.append(results[2][index])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "pickle.dump(input_ids,open(\"C:/Users/itsma/Documents/input_ids.pkl\",\"wb\"))\n",
    "pickle.dump(labels,open(\"C:/Users/itsma/Documents/label.pkl\",\"wb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "config = BertConfig.from_pretrained('C:/Users/itsma/Documents/BERT_models/NCBI_BERT_pubmed_mimic_uncased_L-12_H-768_A-12')\n",
    "config.output_hidden_states = True\n",
    "\n",
    "bert_tokenizer = BertTokenizer.from_pretrained('C:/Users/itsma/Documents/BERT_models/NCBI_BERT_pubmed_mimic_uncased_L-12_H-768_A-12')\n",
    "bert_model = BertModel.from_pretrained(\"C:/Users/itsma/Documents/BERT_models/NCBI_BERT_pubmed_mimic_uncased_L-12_H-768_A-12\",config=config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "bert_input1 = \"I love my Dog.He is Cute.\" \n",
    "bert_input2 = \"He is Cute.\"\n",
    "encodings1 = bert_tokenizer.encode(bert_input1,add_special_tokens = True)\n",
    "input_ids1 = torch.tensor(encodings1).unsqueeze(0)  \n",
    "encodings2 = bert_tokenizer.encode(bert_input2,add_special_tokens = True)\n",
    "input_ids2 = torch.tensor(encodings2).unsqueeze(0) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[  101,  1045,  2293,  2026,  3899,  1012,  2002,  2003, 10140,  1012,\n",
       "           102]])"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "input_ids1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[101, 1045, 2293, 2026, 3899, 1012, 2002, 2003, 10140, 1012, 102]"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encodings1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[  101,  2002,  2003, 10140,  1012,   102]])"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "input_ids2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0, 0, 0, 0, 0, 0, 0, 0]"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "[0] * 4 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "metadata": {},
   "outputs": [],
   "source": [
    "sample_list = [1,2,3,4,5,6]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 135,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[1, 2, 3, 4, 5]"
      ]
     },
     "execution_count": 135,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sample_list[:-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
