{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6f7e9f1",
   "metadata": {},
   "outputs": [],
   "source": [
    "from multiprocessing import Process"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e17398ac",
   "metadata": {},
   "outputs": [],
   "source": [
    "def f(name):\n",
    "    print('hello', name)\n",
    "\n",
    "p = Process(target=f, args=('bob',))\n",
    "p.start()\n",
    "p.join()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eab132f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "import time\n",
    "import argparse\n",
    "import multiprocessing\n",
    "\n",
    "import json\n",
    "from transform import *\n",
    "from utils import SaveManager, encode_context_id\n",
    "\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "from transformers import BertTokenizer, BertForMaskedLM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0a7db825",
   "metadata": {},
   "outputs": [],
   "source": [
    "from measure import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2ff8930f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_args():\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"location\", type=str)\n",
    "    parser.add_argument(\"--dump\", type=str)\n",
    "    raw_args = \"234-2 --dump 123\"\n",
    "    raw_args = raw_args.split()\n",
    "    args = parser.parse_args(raw_args)\n",
    "    print(args)\n",
    "    \n",
    "test_args()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4cc544b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "global_raw_args = \"234-2 --dump playground/dump_kl_para_cased \" \\\n",
    "            \"--log 50 \"  \\\n",
    "            \"--save 0 \"  \\\n",
    "            \"--batch 4 \"  \\\n",
    "            \"--device gpu \"  \\\n",
    "            \"--memory 10 \"  \\\n",
    "            \"--model_card bert-base-cased \"\n",
    "global_raw_args = global_raw_args.split()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0bbceacb",
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_args(raw_args=None):\n",
    "    work_dir_dict = {\n",
    "        \"234-2\": \"/data/disk5/private/yuc/coref/bert-tagger\",\n",
    "        \"cluster\": \"/home/shiyukai/project/yuc/coref/bert-tagger\"\n",
    "    }\n",
    "    server_list = [\"234-2\", \"cluster\"]\n",
    "\n",
    "    parser = argparse.ArgumentParser(description=\"Arguments for relation generation.\")\n",
    "    parser.add_argument('location', choices=server_list,\n",
    "                        help='Indicate the server this script is running on.')\n",
    "    parser.add_argument('--dump', dest='dump_dir', required=True,\n",
    "                        help='Set directory to dump the data and progress log.'\n",
    "                        '(relative to the working directory, for example, bert-tagger/)')\n",
    "    parser.add_argument('--log', dest='log_interval', type=int, default=100,\n",
    "                        help='Set log interval for the process.')\n",
    "    parser.add_argument('--save', dest='save_interval', type=int, default=500,\n",
    "                        help='Set save interval for the process.')\n",
    "    parser.add_argument('--batch', dest='batch_size', type=int, default=0,\n",
    "                        help='Batch size used by BERT model. '\n",
    "                        '0 for dynamic batch size suited to memory.')\n",
    "    parser.add_argument('--memory', type=int, default=16,\n",
    "                        help=\"GPU memory(GB). Referred when batch size is set to dynamic(0).\")\n",
    "    parser.add_argument(\"--device\", \"-d\", choices=[\"cpu\", \"gpu\"], default=\"gpu\", \n",
    "                        help=\"Devices\")\n",
    "    parser.add_argument(\"--model_card\", default=\"bert-base-cased\")\n",
    "\n",
    "    if raw_args != None:\n",
    "        args = parser.parse_args(raw_args)\n",
    "    else:\n",
    "        args = parser.parse_args()\n",
    "        \n",
    "    path_flags = {}\n",
    "    WORK_DIR = work_dir_dict[\"234-2\"]\n",
    "    if args.location in work_dir_dict.keys():\n",
    "        WORK_DIR = work_dir_dict[args.location]\n",
    "    FILE_LIST = os.path.join(WORK_DIR, \"playground/filelist.txt\")\n",
    "    WIKI_DIR = os.path.join(WORK_DIR, \"../wikipedia/text\")\n",
    "    PARSED_DATA_DIR = os.path.join(WORK_DIR, \"../wikipedia/parsed-text\")\n",
    "    DUMP_DIR = os.path.join(WORK_DIR, args.dump_dir)\n",
    "    if os.path.exists(DUMP_DIR) and not os.path.isdir(DUMP_DIR):\n",
    "        print(\"Error: dump path refer to a non-directory.\")\n",
    "        exit()\n",
    "    if not os.path.exists(DUMP_DIR):\n",
    "        os.makedirs(DUMP_DIR)\n",
    "    path_flags[\"--work_dir\"] = WORK_DIR\n",
    "    path_flags[\"--file_list\"] = FILE_LIST\n",
    "    path_flags[\"--wiki_dir\"] = WIKI_DIR\n",
    "    path_flags[\"--parsed_data_dir\"] = PARSED_DATA_DIR\n",
    "    path_flags[\"--dump_dir\"] = DUMP_DIR\n",
    "    path_parser = argparse.ArgumentParser()\n",
    "    for key in path_flags.keys():\n",
    "        path_parser.add_argument(key, type=str)\n",
    "    raw_args = []\n",
    "    for key, value in path_flags.items():\n",
    "        raw_args.append(key)\n",
    "        raw_args.append(value)\n",
    "    args = path_parser.parse_args(raw_args, namespace=args)\n",
    "    \n",
    "    return args\n",
    "\n",
    "global_args = parse_args(global_raw_args)\n",
    "print(global_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f624894d",
   "metadata": {},
   "outputs": [],
   "source": [
    "global_device = torch.device(\"cpu\")\n",
    "global_tokenizer = BertTokenizer.from_pretrained(\"bert-base-cased\")\n",
    "global_model = BertForMaskedLM.from_pretrained(\"bert-base-cased\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "264cc789",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ParsedSentenceIterable:\n",
    "    def __init__(self, args,\n",
    "        file_id=0,\n",
    "        stc_id=0,\n",
    "        transform=None,\n",
    "        device=None):\n",
    "        self.parsed_data_dir = args.parsed_data_dir\n",
    "        self.file_id = file_id\n",
    "        self.stc_id = stc_id\n",
    "        self.transform = transform\n",
    "        self.device = device\n",
    "        self.path_to_file_list = os.path.join(self.parsed_data_dir, \"filelist.txt\")\n",
    "        if not os.path.exists(self.path_to_file_list):\n",
    "            file_list = []\n",
    "            for root, dirs, files in os.walk(self.parsed_data_dir):\n",
    "                for filename in files:\n",
    "                    if filename[-4:] == \"dump\":\n",
    "                        file_list.append(filename)\n",
    "            pattern = re.compile(\"([1-9][0-9]*).dump\")\n",
    "            def sort_key(x):\n",
    "                return eval(pattern.search(x).group(1))\n",
    "            file_list.sort(key=sort_key)\n",
    "            with open(self.path_to_file_list, \"w\") as fl:\n",
    "                for filename in file_list:\n",
    "                    fl.write(filename + \"\\n\") \n",
    "        # TODO separate filelist.txt for different process\n",
    "\n",
    "    def __iter__(self):\n",
    "        return self.sentence_generator()\n",
    "\n",
    "    def sentence_generator(self):\n",
    "        with open(self.path_to_file_list, \"r\") as f_list:\n",
    "            for file_id, file_path in enumerate(f_list):\n",
    "                if file_id < self.file_id:\n",
    "                    continue\n",
    "                file_path = os.path.join(self.parsed_data_dir, file_path)\n",
    "                with open(file_path.strip()) as fs:\n",
    "                    for stc_id, line in enumerate(fs):\n",
    "                        # { \"sentence\", \"file_id\", \"sent_id\", \"np_list\" }\n",
    "                        sentence = json.loads(line.strip())\n",
    "                        if self.transform != None:\n",
    "                            sentence = self.transform(sentence)\n",
    "                        if sentence != None:\n",
    "                            yield (sentence, file_id, stc_id)\n",
    "                            \n",
    "def test_sentence_iterable(args):\n",
    "    transform = IndexMappingTransform(global_device, global_tokenizer)\n",
    "    sent_dataset = ParsedSentenceIterable(args, transform=transform, device=global_device)\n",
    "    for sent in sent_dataset:\n",
    "        print(sent)\n",
    "        return sent\n",
    "        \n",
    "global_sent = test_sentence_iterable(global_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3f8c7a82",
   "metadata": {},
   "outputs": [],
   "source": [
    "class QuestionPairIterable(Dataset):\n",
    "    def __init__(self, \n",
    "        sentence,\n",
    "        mask_placeholder=\"[MASK]\",\n",
    "        miss_placeholder=\"[MASK]\",\n",
    "        device=None, tokenizer=None):\n",
    "        super(QuestionPairIterable).__init__()\n",
    "        self.sentence = sentence[\"tokens\"]\n",
    "        self.miss_ph = miss_placeholder\n",
    "        self.mask_ph = mask_placeholder\n",
    "        # TODO is this tokenizer argument needed?\n",
    "        self.tokenizer = tokenizer\n",
    "        self.miss_id = self.tokenizer.convert_tokens_to_ids(miss_placeholder)\n",
    "        self.mask_id = self.tokenizer.convert_tokens_to_ids(mask_placeholder)\n",
    "        self.device = device\n",
    "        self.index_pairs = self.generate_index_pairs()\n",
    "        self.start = 0\n",
    "        self.end = len(self.index_pairs)\n",
    "    \n",
    "    def generate_index_pairs(self):\n",
    "        length = len(self.sentence)\n",
    "        return [\n",
    "            ([miss_index], [1], [mask_index], [1])\n",
    "            for miss_index in range(1, length-1)\n",
    "                for mask_index in range(1, length-1)\n",
    "                    if miss_index != mask_index\n",
    "        ]\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.index_pairs)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        missing_indices, miss_idx_mask, masked_indices, mask_idx_mask = self.index_pairs[index]\n",
    "        # unmasked_question = list(self.sentence)\n",
    "        unmasked_question = self.sentence.clone()\n",
    "        for missing_index in missing_indices:\n",
    "            # unmasked_question[missing_index] = self.miss_ph\n",
    "            unmasked_question[missing_index] = self.miss_id\n",
    "        # masked_question = list(unmasked_question)\n",
    "        masked_question = unmasked_question.clone()\n",
    "        for masked_index in masked_indices:\n",
    "            # masked_question[masked_index] = self.mask_ph\n",
    "            masked_question[masked_index] = self.mask_id\n",
    "\n",
    "        # this is the format of a question pair.\n",
    "        return {\n",
    "            \"label\": self.sentence,\n",
    "            \"unmasked\": unmasked_question, \n",
    "            \"masked\": masked_question, \n",
    "            \"miss_id\": torch.tensor(missing_indices, device=self.device),\n",
    "            \"mask_id\": torch.tensor(masked_indices, device=self.device),\n",
    "            \"miss_mask\": torch.tensor(miss_idx_mask, device=self.device),\n",
    "            # \"mask_mask\": torch.tensor(mask_idx_mask, device=self.device),\n",
    "        }\n",
    "    \n",
    "    def optimized_collate_fn(self, examples):\n",
    "        questions = []\n",
    "        miss_ids = []\n",
    "        # last_miss_id = None\n",
    "        last_unmasked_ptr = 0\n",
    "        for example in examples:\n",
    "            miss_id = example[\"miss_id\"]\n",
    "            if len(miss_ids) > 0:\n",
    "                print(miss_id, miss_id == miss_ids[-1])\n",
    "            # if last_miss_id == None or not torch.all(miss_id == last_miss_id):\n",
    "            if len(miss_ids)==0 or not torch.all(miss_id == miss_ids[-1]):\n",
    "                # add new question\n",
    "                # last_miss_id = miss_id\n",
    "                questions.append(example[\"unmasked\"])\n",
    "                miss_ids.append(miss_id)\n",
    "                example[\"unmasked\"] = torch.tensor(len(questions)-1, device=self.device)\n",
    "                last_unmasked_ptr = torch.tensor(len(questions)-1, device=self.device)\n",
    "            else: # point to old question\n",
    "                example[\"unmasked\"] = last_unmasked_ptr\n",
    "            questions.append(example[\"masked\"])\n",
    "            miss_ids.append(miss_id)\n",
    "            example[\"masked\"] = torch.tensor(len(questions)-1, device=self.device)\n",
    "        batch = { key: torch.stack([ ele[key] for ele in examples ]) for key in examples[0] if key != \"label\" }\n",
    "        batch[\"label\"] = examples[0][\"label\"]\n",
    "        batch[\"question_batch\"] = torch.stack(questions)\n",
    "        batch[\"miss_id_batch\"] = torch.stack(miss_ids)\n",
    "        return batch\n",
    "        \n",
    "class NounQuestionPairIterable(QuestionPairIterable):\n",
    "    def __init__(self, sentence, **args):\n",
    "        self.np_list = sentence[\"np_list\"]\n",
    "        super().__init__(sentence, **args)\n",
    "        \n",
    "    def generate_index_pairs(self):\n",
    "        return [\n",
    "            (miss_idx, miss_mask, mask_idx, mask_mask)\n",
    "            for miss_idx, miss_mask in self.np_list\n",
    "                for mask_idx, mask_mask in self.np_list\n",
    "                    if miss_idx != mask_idx\n",
    "        ]\n",
    "\n",
    "def expected_batch_size(length, args):\n",
    "    if args.batch_size == 0:\n",
    "        return min((220 // length), 4) * args.memory\n",
    "    else:\n",
    "        return args.batch_size\n",
    "    \n",
    "def test_question_pair_data(sent):\n",
    "    dataset = NounQuestionPairIterable(sent, device=global_device, tokenizer=global_tokenizer)\n",
    "    dataloader = DataLoader(dataset,\n",
    "            batch_size=4,\n",
    "            collate_fn=dataset.optimized_collate_fn,\n",
    "            num_workers=0)\n",
    "    for batch in dataloader:\n",
    "        return batch\n",
    "\n",
    "global_batch = test_question_pair_data(global_sent[0])\n",
    "print(global_batch)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "80f7219b",
   "metadata": {},
   "outputs": [],
   "source": [
    "class CompressedQuestionPairConsumer:\n",
    "    def __init__(self, tokenizer, model, measure, device):\n",
    "        self.tokenizer = tokenizer\n",
    "        self.model = model\n",
    "        self.measure = measure\n",
    "        self.device = device\n",
    "  \n",
    "    def consume_question_pair(self, question_pair):\n",
    "        # [Q(uestion count deduplicated), L(ength of sentence)]\n",
    "        questions = question_pair[\"question_batch\"]        \n",
    "        # [Q, n(umber of missing tokens)]\n",
    "        missing_indices = question_pair[\"miss_id_batch\"]\n",
    "        # [B, n]\n",
    "        miss_idx_mask = question_pair[\"miss_mask\"]\n",
    "        # [B]\n",
    "        unmasked = question_pair[\"unmasked\"]\n",
    "        masked = question_pair[\"masked\"]\n",
    "        question_num = questions.shape[0]\n",
    "        # [L(ength of sentence)]\n",
    "        context = question_pair[\"label\"]\n",
    "        # [B(atch), L(ength of sentence)]\n",
    "        context = context.unsqueeze(0).expand(question_num, -1)\n",
    "\n",
    "        # [Q, L, V]\n",
    "        logits = self.model(input_ids=questions).logits\n",
    "        missing_label_ids = torch.gather(context, 1, missing_indices) # [Q, n]\n",
    "        answer_shape = list(missing_indices.shape)\n",
    "        answer_shape.append(logits.shape[2])\n",
    "        missing_indices = missing_indices.unsqueeze(2).expand(answer_shape) # [Q, n, V]\n",
    "        missing_label_ids = missing_label_ids.unsqueeze(2).expand(answer_shape) # [Q, n, V]\n",
    "        ones_template = torch.tensor([[[1.]]], device=self.device).expand(answer_shape) # [Q, n, V]\n",
    "        # golden logits ,g_logits[b][n][index[b][n]] = 1, [Q, n, V]\n",
    "        g_logits = torch.scatter(torch.zeros(answer_shape, device=self.device), 2, missing_label_ids, ones_template)\n",
    "        # predicted logits, [Q, n, V]\n",
    "        p_logits = torch.gather(logits, 1, missing_indices)\n",
    "        # unmasked logits [B, n, V]\n",
    "        u_logits = torch.index_select(p_logits, 0, unmasked)\n",
    "        # masked logits [B, n, V]\n",
    "        m_logits = torch.index_select(p_logits, 0, masked)\n",
    "        return self.measure(m_logits, u_logits, g_logits, miss_idx_mask)\n",
    "    \n",
    "def test_consumer(batch):\n",
    "    consumer = CompressedQuestionPairConsumer(global_tokenizer, global_model, js_divergence_dist, global_device)\n",
    "    result = consumer.consume_question_pair(batch)\n",
    "    return result\n",
    " \n",
    "global_result = test_consumer(global_batch)\n",
    "print(global_result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a1fa58db",
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_tagging(args):\n",
    "    print(\"Global initialization started.\")\n",
    "    device = torch.device('cpu')\n",
    "    if args.device == \"gpu\" and torch.cuda.is_available() == True:\n",
    "        device = torch.device('cuda')\n",
    "    tokenizer = None\n",
    "    model = None\n",
    "    if args.location == \"cluster\":\n",
    "        PRETRAINED_PATH = os.path.join(args.work_dir, \"pretrained_models\", args.model_card)\n",
    "        MODEL_PATH = os.path.join(PRETRAINED_PATH, \"model\")\n",
    "        TOKENIZER_PATH = os.path.join(PRETRAINED_PATH, \"tokenizer\")\n",
    "        tokenizer = BertTokenizer.from_pretrained(TOKENIZER_PATH, local_files_only=True)\n",
    "        model = BertForMaskedLM.from_pretrained(MODEL_PATH, local_files_only=True).to(device)\n",
    "    else:\n",
    "        tokenizer = BertTokenizer.from_pretrained(args.model_card)\n",
    "        model = BertForMaskedLM.from_pretrained(args.model_card).to(device)\n",
    "    print(\"Global initialization completed.\")\n",
    "\n",
    "    log_interval = args.log_interval\n",
    "    save_interval = args.save_interval\n",
    "\n",
    "    save_manager = SaveManager(\n",
    "        dump_dir=args.dump_dir,\n",
    "        log_interval=log_interval,\n",
    "        save_interval=save_interval)\n",
    "    last_file_id, last_stc_id = save_manager.load_progress()\n",
    "\n",
    "    transform = IndexMappingTransform(device, tokenizer)\n",
    "    sentence_dataset = ParsedSentenceIterable(\n",
    "        args,\n",
    "        file_id=last_file_id,\n",
    "        stc_id=last_stc_id,\n",
    "        transform=transform,\n",
    "        device=device)\n",
    "    consumer = CompressedQuestionPairConsumer(\n",
    "        tokenizer=tokenizer, model=model, measure=js_divergence_dist, \n",
    "        device=device)\n",
    "    save_manager.start_watch()\n",
    "    for sentence, file_id, stc_id in sentence_dataset:\n",
    "        context_id = encode_context_id(file_id, stc_id)\n",
    "        question_pair_dataset = NounQuestionPairIterable(sentence, device=device, tokenizer=tokenizer)\n",
    "        length = len(sentence[\"raw\"])\n",
    "        dataloader = DataLoader(question_pair_dataset,\n",
    "            batch_size=expected_batch_size(length, args),\n",
    "            collate_fn=question_pair_dataset.optimized_collate_fn,\n",
    "            num_workers=0)\n",
    "        for sample_batched in dataloader:\n",
    "            distance = consumer.consume_question_pair(sample_batched)\n",
    "            save_manager.update_sentence(sentence, context_id)\n",
    "            save_manager.update_relation_batched(sample_batched, distance, context_id)\n",
    "            return distance\n",
    "        \n",
    "run_tagging(global_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "87b59569",
   "metadata": {},
   "outputs": [],
   "source": [
    "for parameter in global_model.parameters():\n",
    "    print(parameter.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c59c9428",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0868c206",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.9  (conda)",
   "language": "python",
   "name": "pythonjvsc74a57bd0ed789356564aca5994f801776aa8fac70745a610a01bf29c141cd7ff6dd8c909"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
