{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "00539cf9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "import json\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "from transformers import BertTokenizer, BertForMaskedLM, BertModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "6241f047",
   "metadata": {},
   "outputs": [],
   "source": [
    "WORK_DIR = \"/data/disk5/private/yuc/coref/bert-tagger\"\n",
    "FILE_LIST = \"filelist.txt\"\n",
    "WIKI_DIR = os.path.join(WORK_DIR, \"../wikipedia/text\")\n",
    "# DUMP_DIR =  os.path.join(WORK_DIR, \"playground/dump\")\n",
    "DUMP_DIR = os.path.join(WORK_DIR, \"playground/dump_kl_para\")\n",
    "LOG_DIR = os.path.join(WORK_DIR, \"playground/logs\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "55fb3d21",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "246\n"
     ]
    }
   ],
   "source": [
    "TEXT_DIR = os.path.join(WORK_DIR, \"../wikipedia/parsed-text\")\n",
    "files = os.listdir(TEXT_DIR)\n",
    "files = [ filename for filename in files if filename[:4] == \"text\"]\n",
    "print(len(files))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "79228c11",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[110107, 254563, 207264, 169475, 131670, 99504, 73419, 53441, 38283, 27709, 19496, 14024, 9863, 7265, 5196, 3587, 2388, 1466, 725, 331, 133, 55, 18, 5, 3, 2, 0, 2, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n"
     ]
    }
   ],
   "source": [
    "np_list_lengths = [0, ] * 40\n",
    "bin_width = 4\n",
    "discard_count = 0\n",
    "for filename in files:\n",
    "    with open(os.path.join(TEXT_DIR, filename), \"r\") as f:\n",
    "        for line in f:\n",
    "            sentence = json.loads(line.strip())\n",
    "            if len(sentence[\"np_list\"]) > 128:\n",
    "                discard_count += 1\n",
    "                continue\n",
    "            np_list_lengths[ len(sentence[\"np_list\"]) // bin_width ] += 1\n",
    "            \n",
    "print(np_list_lengths)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ba0569f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"Global initialization started.\")\n",
    "global_device = torch.device('cuda')\n",
    "global_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "global_model = BertForMaskedLM.from_pretrained('bert-base-uncased').to(global_device)\n",
    "print(\"Global initialization completed.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "71a28225",
   "metadata": {},
   "outputs": [],
   "source": [
    "def default_transform(sentence, device=global_device):\n",
    "    if sentence[0] == \"<\":\n",
    "        return None\n",
    "    # raw_tokens: parsed into subword but not yet converted to ids\n",
    "    raw_tokens = global_tokenizer.tokenize(sentence)\n",
    "    # tokens converted to ids\n",
    "    tokens = global_tokenizer(sentence, return_tensors=\"pt\")[\"input_ids\"].to(device)\n",
    "    tokens = torch.squeeze(tokens)\n",
    "    # l = len(tokens)\n",
    "    l = tokens.shape[0]\n",
    "    if l >= 30: # ignore doc that is too long\n",
    "        return None\n",
    "    if l <= 5: # ignore invalid lines and short sentences\n",
    "        return None\n",
    "\n",
    "    # this is the format of a sentence.\n",
    "    return {\n",
    "        \"tokens\": tokens,\n",
    "        \"raw\": raw_tokens\n",
    "    }\n",
    "\n",
    "sentence = global_tokenizer.convert_tokens_to_string(['cochran', '–', 'arm', '##ita', '##ge', 'test', 'for', 'trend'])\n",
    "print(sentence)\n",
    "ts = default_transform(sentence)\n",
    "print(ts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d6147f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SentenceIterable:\n",
    "    def __init__(self,\n",
    "        file_path_list=FILE_LIST,\n",
    "        file_id=0,\n",
    "        stc_id=0,\n",
    "        transform=default_transform):\n",
    "        self.file_id = file_id\n",
    "        self.stc_id = stc_id\n",
    "        with open(file_path_list, \"r\") as f_list:\n",
    "            self.file_paths = f_list.read().split()\n",
    "        if transform == None:\n",
    "            self.transform = default_transform\n",
    "        else:\n",
    "            self.transform = transform\n",
    "        print(\"SentenceIterable constructed.\")\n",
    "        \n",
    "    def __iter__(self):\n",
    "        return self.sentence_generator()\n",
    "    \n",
    "    def sentence_generator(self):\n",
    "        file_count = len(self.file_paths)\n",
    "        while self.file_id < file_count:\n",
    "            file_path = self.file_paths[self.file_id]\n",
    "            with open(file_path) as fs:\n",
    "                sentences = fs.readlines()\n",
    "                sentence_count = len(sentences)\n",
    "                while self.stc_id < sentence_count:\n",
    "                    sentence = sentences[self.stc_id]\n",
    "                    sentence = self.transform(sentence)\n",
    "                    if sentence == None:\n",
    "                        print(\"sentence discarded.\")\n",
    "                    else:\n",
    "                        yield (sentence, self.file_id, self.stc_id)\n",
    "                    self.stc_id += 1\n",
    "            self.stc_id = 0\n",
    "            self.file_id += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0edd5fc4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class QuestionPairIterable(Dataset):\n",
    "    def __init__(self, \n",
    "        sentence,\n",
    "        mask_placeholder=\"[MASK]\",\n",
    "        miss_placeholder=\"[MASK]\"):\n",
    "        super(QuestionPairIterable).__init__()\n",
    "        self.sentence = sentence[\"tokens\"]\n",
    "        self.miss_ph = miss_placeholder\n",
    "        self.mask_ph = mask_placeholder\n",
    "        self.miss_id = global_tokenizer.convert_tokens_to_ids(miss_placeholder)\n",
    "        self.mask_id = global_tokenizer.convert_tokens_to_ids(mask_placeholder)\n",
    "        length = len(self.sentence)\n",
    "        self.index_pairs = [\n",
    "            ([miss_index], [mask_index])\n",
    "            for miss_index in range(1, length-1)\n",
    "                for mask_index in range(1, length-1)\n",
    "                    if miss_index != mask_index\n",
    "        ]\n",
    "\n",
    "        self.start = 0\n",
    "        self.end = len(self.index_pairs)\n",
    "        print(\"QuestionPairIterable constructed.\")\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.index_pairs)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        missing_indices, masked_indices = self.index_pairs[index]\n",
    "        # unmasked_question = list(self.sentence)\n",
    "        unmasked_question = self.sentence.clone()\n",
    "        for missing_index in missing_indices:\n",
    "            # unmasked_question[missing_index] = self.miss_ph\n",
    "            unmasked_question[missing_index] = self.miss_id\n",
    "        # masked_question = list(unmasked_question)\n",
    "        masked_question = unmasked_question.clone()\n",
    "        for masked_index in masked_indices:\n",
    "            # masked_question[masked_index] = self.mask_ph\n",
    "            masked_question[masked_index] = self.mask_id\n",
    "        return {\n",
    "            \"label\": self.sentence,\n",
    "            \"unmasked\": unmasked_question, \n",
    "            \"masked\": masked_question, \n",
    "            \"miss_id\": torch.tensor(missing_indices), \n",
    "            \"mask_id\": torch.tensor(masked_indices)\n",
    "        }\n",
    "    \n",
    "def test_pair_iterable():\n",
    "    sentence = {\n",
    "        \"tokens\": torch.from_numpy(np.array([1,2,3,4,5]))\n",
    "    }\n",
    "    dataset = QuestionPairIterable(sentence)\n",
    "    for sample in dataset:\n",
    "        print(sample)\n",
    "\n",
    "test_pair_iterable()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b522e2a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "sentence = global_tokenizer.convert_tokens_to_string(['cochran', '–', 'arm', '##ita', '##ge', 'test', 'for', 'trend'])\n",
    "sentence = default_transform(sentence)\n",
    "producer = QuestionPairIterable(sentence)\n",
    "for sample in producer:\n",
    "    print(sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "34833ece",
   "metadata": {},
   "outputs": [],
   "source": [
    "class QuestionPairConsumer:\n",
    "    def __init__(self,\n",
    "        tokenizer=global_tokenizer,\n",
    "        model=global_model,\n",
    "        measure=kl_divergence_dist):\n",
    "        self.tokenizer = tokenizer\n",
    "        self.model = model\n",
    "        self.measure = measure\n",
    "    \n",
    "    def consume_question_pair(self, question_pair):\n",
    "        # [B(atch), L(ength of sentence)]\n",
    "        context = question_pair[\"label\"]\n",
    "        unmasked = question_pair[\"unmasked\"]\n",
    "        masked = question_pair[\"masked\"]\n",
    "        # [B(atch), n(umber of missing tokens)]\n",
    "        missing_indices = question_pair[\"miss_id\"]\n",
    "        masked_indices = question_pair[\"mask_id\"]\n",
    "        # u_pred = consume_question(unmasked, context)\n",
    "        # m_pred = consume_question(masked, unmasked)\n",
    "        # [B(atch), L(ength of sentence), V(ocabulary size)]\n",
    "        u_logits = self.model(input_ids=unmasked).logits\n",
    "        m_logits = self.model(input_ids=masked).logits\n",
    "\n",
    "        missing_label_ids = torch.gather(context, 1, missing_indices) # [B, n]\n",
    "        answer_shape = list(missing_indices.shape)\n",
    "        answer_shape.append(u_logits.shape[2])\n",
    "        missing_indices = missing_indices.unsqueeze(2).expand(answer_shape) # [B, n, V]\n",
    "        missing_label_ids = missing_label_ids.unsqueeze(2).expand(answer_shape) # [B, n, V]\n",
    "        \n",
    "        ones_template = torch.tensor([[[1.]]]).expand(answer_shape) # [B, n, V]\n",
    "        # golden logits ,g_logits[b][n][index[b][n]] = 1\n",
    "        g_logits = torch.scatter(torch.zeros(answer_shape), 2, missing_label_ids, ones_template)\n",
    "        # unmasked logits\n",
    "        u_logits = torch.gather(u_logits, 1, missing_indices)\n",
    "        # masked logits\n",
    "        m_logits = torch.gather(m_logits, 1, missing_indices)\n",
    "        \n",
    "        return self.measure(m_logits, u_logits, g_logits)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b940e2c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_batch_size(batch):\n",
    "    for value in batch.values():\n",
    "        return value.shape[0]\n",
    "\n",
    "class SaveManager:\n",
    "    def __init__(self,\n",
    "        dump_dir=DUMP_DIR,\n",
    "        counter=0,\n",
    "        log_interval=100,\n",
    "        save_interval=500):\n",
    "        self.sentence_dict = {}\n",
    "        self.relation_list = []\n",
    "        self.log_interval = log_interval\n",
    "        self.save_interval = save_interval\n",
    "        self.counter = counter - counter % save_interval\n",
    "        self.dump_dir = dump_dir\n",
    "        self.progress_path = os.path.join(self.dump_dir, \"progress.log\")\n",
    "        self.rel_template = os.path.join(dump_dir, \"relation_list_cnt_{}.dump\")\n",
    "        self.stc_template = os.path.join(dump_dir, \"sentence_dict_cnt_{}.dump\")\n",
    "\n",
    "    def load_progress(self):\n",
    "        return (0, 0)\n",
    "\n",
    "        if os.path.exists(self.progress_path):\n",
    "            with open(self.progress_path, \"r\") as p_log:\n",
    "                progress = json.load(p_log)\n",
    "                file_id = progress[\"file_id\"]\n",
    "                stc_id = progress[\"stc_id\"]\n",
    "                self.save_interval = progress[\"save_interval\"]\n",
    "                self.counter = progress[\"counter\"]\n",
    "                return (file_id, stc_id)\n",
    "        return (0, 0)\n",
    "\n",
    "    def dump_progress(self, file_id, stc_id):\n",
    "        with open(self.progress_path, \"w\") as p_log:\n",
    "            progress = {\n",
    "                \"file_id\": file_id,\n",
    "                \"stc_id\": stc_id,\n",
    "                \"counter\": self.counter,\n",
    "                \"save_interval\": self.save_interval\n",
    "            }\n",
    "            p_log.write(json.dumps(progress))\n",
    "        \n",
    "    def save_sentence_list(self):\n",
    "        sentence_list = []\n",
    "        for context_id, raw_tokens in self.sentence_dict:\n",
    "            self.sentence_list.append({\n",
    "            \"id\": context_id,\n",
    "            \"context\": raw_tokens\n",
    "        })\n",
    "        sentence_list.sort(key=lambda x:x[\"id\"])\n",
    "        save_path = self.stc_template.format(self.counter)\n",
    "        with open(save_path, \"w\") as f:\n",
    "            for sentence in sentence_list:\n",
    "                f.write(json.dumps(sentence)+\"\\n\")\n",
    "\n",
    "    def update_sentence(self, sentence, context_id):\n",
    "        self.sentence_dict[context_id] = sentence[\"raw\"]\n",
    "    \n",
    "    def save_relation_list(self):\n",
    "        save_path = self.rel_template.format(self.counter)\n",
    "        with open(save_path, \"w\") as f:\n",
    "            for relation in self.relation_list:\n",
    "                f.write(json.dumps(relation)+\"\\n\")\n",
    "    \n",
    "    def update_relation(self, sample, distance, context_id):\n",
    "        self.relation_list.append({\n",
    "            \"context\": context_id,\n",
    "            \"missing_index\": sample[\"miss_id\"].tolist(),\n",
    "            \"masked_index\": sample[\"mask_id\"].tolist(),\n",
    "            \"distance\": float(distance)\n",
    "        })\n",
    "        self.counter += 1\n",
    "        if self.counter % self.log_interval == 0:\n",
    "            print(\"Got example count: \", self.counter)\n",
    "        if self.counter % self.save_interval == 0:\n",
    "            print(\"Save examples.\")\n",
    "            save_relation_list()\n",
    "            save_sentence_list()\n",
    "            self.relation_list = []\n",
    "            self.sentence_dict = {}\n",
    "\n",
    "    def update_relation_batched(self, batch, distance, context_id):\n",
    "        batch_size = get_batch_size(batch)\n",
    "        for index in range(0, batch_size):\n",
    "            relation = {}\n",
    "            for key, batched_tensor in batch.items():\n",
    "                relation[key] = batched_tensor[index]\n",
    "            self.update_relation(relation, distance[index], context_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bff0f8dc",
   "metadata": {},
   "outputs": [],
   "source": [
    "def main():\n",
    "    sentence_list = []\n",
    "    relation_list = []\n",
    "    log_interval = 100\n",
    "    save_interval = 500\n",
    "\n",
    "    save_manager = SaveManager(save_interval=save_interval)\n",
    "    last_file_id, last_stc_id = save_manager.load_progress()\n",
    "   \n",
    "    sentence_dataset = SentenceIterable(file_id=last_file_id,stc_id=last_stc_id)\n",
    "    consumer = QuestionPairConsumer() \n",
    "\n",
    "    for sentence, file_id, stc_id in sentence_dataset:\n",
    "        context_id = file_id * 50000 + stc_id\n",
    "        stc_relation_list = []\n",
    "        question_pair_dataset = QuestionPairIterable(sentence)\n",
    "        dataloader = DataLoader(question_pair_dataset, batch_size=32, num_workers=0)\n",
    "        for sample_batched in dataloader:\n",
    "            distance = consumer.consume_question_pair(sample_batched)\n",
    "            save_manager.update_sentence(sentence, context_id)\n",
    "            save_manager.update_relation_batched(sample_batched, distance, context_id)\n",
    "            save_manager.dump_progress(file_id, stc_id)\n",
    "            break\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "66b39e02",
   "metadata": {},
   "outputs": [],
   "source": [
    "main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "daaab200",
   "metadata": {},
   "outputs": [],
   "source": [
    "sample = main()\n",
    "label = sample[\"label\"]\n",
    "unmasked = sample[\"unmasked\"]\n",
    "masked = sample[\"masked\"]\n",
    "print(sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a3a0771c",
   "metadata": {},
   "outputs": [],
   "source": [
    "consumer = QuestionPairConsumer()\n",
    "g_logits, u_logits, m_logits = consumer.consume_question_pair(sample)\n",
    "print(g_logits, u_logits, m_logits)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "293bbd0b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# result: masked; target: unmasked index: golden\n",
    "def index_only_dist(result, target, index):\n",
    "    n_dim = 1\n",
    "    v_dim = 2\n",
    "    return torch.mean(\n",
    "        torch.sum(\n",
    "            F.relu(target - result) * index, dim=v_dim\n",
    "        ),\n",
    "        dim=n_dim\n",
    "    )\n",
    "\n",
    "def kl_divergence_dist(result, target, index):\n",
    "    n_dim = 1\n",
    "    v_dim = 2\n",
    "    return torch.mean(\n",
    "        torch.sum(\n",
    "            F.softmax(target, dim=v_dim) * ( - F.log_softmax(result, dim=v_dim) + \n",
    "            F.log_softmax(target, dim=v_dim)),\n",
    "            dim=v_dim\n",
    "        ),\n",
    "        dim=n_dim\n",
    "    )\n",
    "\n",
    "def cross_entropy_dist(result, target, index):\n",
    "    n_dim = 1\n",
    "    v_dim = 2\n",
    "    return torch.mean(\n",
    "        torch.sum(\n",
    "            - F.softmax(target, dim=v_dim) * F.log_softmax(result, dim=v_dim),\n",
    "            dim=v_dim\n",
    "        ),\n",
    "        dim=n_dim\n",
    "    )\n",
    "\n",
    "print(index_only_dist(u_logits, m_logits, g_logits))\n",
    "print(kl_divergence_dist(u_logits, m_logits, g_logits))\n",
    "print(cross_entropy_dist(u_logits, m_logits, g_logits))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e86500e1",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "hamlet = \"To be or not to be, that is the question.\"\n",
    "hysteria = \"a psychoneurosis marked by emotional excitability and disturbances of \" \\\n",
    "    \"the psychogenic, sensory, vasomotor, and visceral functions\"\n",
    "village = \"The statistical area Berkhout which also can include the peripheral \" \\\n",
    "    \"parts of the village as well as the surrounding countryside has a population of around 1780.\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "50882aa5",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer_to_train = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "model_to_train = BertModel.from_pretrained('bert-base-uncased')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "26d43028",
   "metadata": {},
   "outputs": [],
   "source": [
    "inputs = tokenizer_to_train(hamlet, return_tensors=\"pt\")\n",
    "outputs = model_to_train(**inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d97ded4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(outputs.keys())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ac6b7ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from transformers import BertForSequenceClassification\n",
    "model = BertForSequenceClassification.from_pretrained('bert-base-uncased')\n",
    "model.train()\n",
    "from transformers import AdamW\n",
    "optimizer = AdamW(model.parameters(), lr=1e-5)\n",
    "from transformers import BertTokenizer\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "text_batch = [\"I love Pixar.\", \"I don't care for Pixar.\"]\n",
    "encoding = tokenizer(text_batch, return_tensors='pt', padding=True, truncation=True)\n",
    "input_ids = encoding['input_ids']\n",
    "attention_mask = encoding['attention_mask']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ef37e6d",
   "metadata": {},
   "outputs": [],
   "source": [
    "labels = torch.tensor([1,0]).unsqueeze(0)\n",
    "outputs = model(input_ids, attention_mask=attention_mask, labels=labels)\n",
    "loss = outputs.loss\n",
    "loss.backward()\n",
    "optimizer.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9ffaccff",
   "metadata": {},
   "outputs": [],
   "source": [
    "import nltk\n",
    "from nltk.tokenize import word_tokenize\n",
    "from nltk.tag import pos_tag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "c6501f52",
   "metadata": {},
   "outputs": [],
   "source": [
    "import spacy\n",
    "nlp = spacy.load(\"en_core_web_sm\", exclude=['ner','lemmatizer'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "e800d8a5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['tok2vec', 'tagger', 'parser', 'attribute_ruler']\n",
      "\u001b[1m\n",
      "============================= Pipeline Overview =============================\u001b[0m\n",
      "\n",
      "#   Component         Assigns               Requires   Scores             Retokenizes\n",
      "-   ---------------   -------------------   --------   ----------------   -----------\n",
      "0   tok2vec           doc.tensor                                          False      \n",
      "                                                                                     \n",
      "1   tagger            token.tag                        tag_acc            False      \n",
      "                                                                                     \n",
      "2   parser            token.dep                        dep_uas            False      \n",
      "                      token.head                       dep_las                       \n",
      "                      token.is_sent_start              dep_las_per_type              \n",
      "                      doc.sents                        sents_p                       \n",
      "                                                       sents_r                       \n",
      "                                                       sents_f                       \n",
      "                                                                                     \n",
      "3   attribute_ruler                                                       False      \n",
      "\n",
      "\u001b[38;5;2m✔ No problems found.\u001b[0m\n",
      "{'summary': {'tok2vec': {'assigns': ['doc.tensor'], 'requires': [], 'scores': [], 'retokenizes': False}, 'tagger': {'assigns': ['token.tag'], 'requires': [], 'scores': ['tag_acc'], 'retokenizes': False}, 'parser': {'assigns': ['token.dep', 'token.head', 'token.is_sent_start', 'doc.sents'], 'requires': [], 'scores': ['dep_uas', 'dep_las', 'dep_las_per_type', 'sents_p', 'sents_r', 'sents_f'], 'retokenizes': False}, 'attribute_ruler': {'assigns': [], 'requires': [], 'scores': [], 'retokenizes': False}}, 'problems': {'tok2vec': [], 'tagger': [], 'parser': [], 'attribute_ruler': []}, 'attrs': {'doc.sents': {'assigns': ['parser'], 'requires': []}, 'doc.tensor': {'assigns': ['tok2vec'], 'requires': []}, 'token.tag': {'assigns': ['tagger'], 'requires': []}, 'token.head': {'assigns': ['parser'], 'requires': []}, 'token.is_sent_start': {'assigns': ['parser'], 'requires': []}, 'token.dep': {'assigns': ['parser'], 'requires': []}}}\n",
      "a psychoneurosis marked by emotional excitability and disturbances of the psychogenic, sensory, vasomotor, and visceral functions\n",
      "0 2\n",
      "4 6\n",
      "7 8\n",
      "9 19\n"
     ]
    }
   ],
   "source": [
    "print(nlp.pipe_names)\n",
    "print(nlp.analyze_pipes(pretty=True))\n",
    "doc = nlp(hysteria)\n",
    "print(doc)\n",
    "for np in doc.noun_chunks:\n",
    "    print(np.start, np.end)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "39ac7c63",
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "pattern = re.compile(\"wikipedia/text/(.*)$\")\n",
    "with open(\"filelist.txt\") as f, open(\"new_filelist.txt\", \"w\") as fw:\n",
    "    for line in f:\n",
    "        path = pattern.search(line).group(1)\n",
    "        fw.write(path+\"\\n\")\n",
    "        "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
