{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "import time\n",
    "import random\n",
    "import logging\n",
    "import argparse\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "import json\n",
    "\n",
    "import torch\n",
    "from torch import optim\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "from transformers import BertTokenizer, BertModel, BertConfig"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_args(args=None):\n",
    "    parser = argparse.ArgumentParser()\n",
    "    ## Required parameters\n",
    "    \n",
    "    # Other\n",
    "    parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n",
    "                        help=\"The output directory where the model checkpoints and predictions will be written.\")\n",
    "    parser.add_argument(\"--train_dir\", default=None, type=str, \n",
    "                        help=\"Path to data for training.\")\n",
    "    parser.add_argument(\"--predict_dir\", default=None, type=str,\n",
    "                        help=\"Path to data for prediction.\")\n",
    "    parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\")\n",
    "    parser.add_argument(\"--do_predict\", action='store_true', help=\"Whether to run eval on the dev set.\")\n",
    "    parser.add_argument(\"--vocab_file\", default=None, type=str, help=\"vocab file for Bert tokenizer.\")\n",
    "    # Learning\n",
    "    parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\n",
    "                        help=\"Total number of training epochs to perform.\")\n",
    "                        \n",
    "    parser.add_argument(\"--train_batch_size\", default=32, type=int, help=\"Total batch size for training.\")\n",
    "    parser.add_argument(\"--predict_batch_size\", default=8, type=int, help=\"Total batch size for predictions.\")                    \n",
    "    \n",
    "    parser.add_argument(\"--learning_rate\", default=2e-4, type=float, help=\"The initial learning rate for AdamW.\")\n",
    "    parser.add_argument(\"--epsilon\", default=1e-6, type=float, help=\"Epsilon for AdamW.\")\n",
    "    \n",
    "    # Computation limits\n",
    "    parser.add_argument(\"--max_num_candidates\", default=100, type=int, \n",
    "                        help=\"Maximum number of spans considered in an example.\")\n",
    "    parser.add_argument(\"--max_top_antecedents\", default=10, type=int,\n",
    "                        help=\"Maximum number of relations considered for each span.\")\n",
    "    \n",
    "    # Model hyperparameters\n",
    "    parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n",
    "                        help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n",
    "                        \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n",
    "                        \"bert-base-multilingual-cased, bert-base-chinese.\")\n",
    "    parser.add_argument(\"--bert_config_file\", default=None, type=str,\n",
    "                        help=\"Path to the bert config file(bert_config.json, for example)\")\n",
    "    parser.add_argument(\"--dropout\", default=0.3, type=float, \n",
    "                        help=\"Dropout rate for dropout layers.\")\n",
    "    parser.add_argument(\"--ffnn_depth\", default=1, type=int, \n",
    "                        help=\"Number of hidden layers in FFNN.\")\n",
    "    parser.add_argument(\"--ffnn_size\", default=1000, type=int, \n",
    "                        help=\"Size of every hidden layer in FFNN.\")\n",
    "    # TODO This parameter should be read from bert config file.\n",
    "    parser.add_argument(\"--hidden_size\", default=768, type=int,\n",
    "                        help=\"Bert hidden size.\")\n",
    "\n",
    "    parser.add_argument(\"--no_cuda\", action='store_true',\n",
    "                        help=\"Whether not to use CUDA when available\")\n",
    "    parser.add_argument('--seed', default=42, type=int,\n",
    "                        help=\"random seed for initialization\")\n",
    "    \n",
    "    parser.add_argument(\"--do_lower_case\", action='store_true',\n",
    "                        help=\"Whether to lower case the input text. True for uncased models, False for cased models.\")\n",
    "    parser.add_argument(\"--local_rank\", default=-1, type=int,\n",
    "                        help=\"local_rank for distributed training on gpus\")\n",
    "    parser.add_argument('--fp16', action='store_true',\n",
    "                        help=\"Whether to use 16-bit float precision instead of 32-bit\")\n",
    "    if args != None:\n",
    "        return parser.parse_args(args)\n",
    "    else:\n",
    "        return parser.parse_args()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pseudo_args = \"--output_dir . \" \\\n",
    "            \"--bert_model bert-base-cased\"\n",
    "pseudo_args = pseudo_args.split()\n",
    "global_args = parse_args(args=pseudo_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def index_to_mask(tensor, target_index_shape, target_index_mask=None):\n",
    "    # k1, k2, ..., kt\n",
    "    batch_dims = list(tensor.shape[:-2])\n",
    "    t = tensor.shape[-1]\n",
    "    r = tensor.shape[-2]\n",
    "    assert(len(target_index_shape) == t)\n",
    "    indices = torch.split(tensor, 1, dim=-1)\n",
    "    indices = [ indice.squeeze(-1) for indice in indices ]\n",
    "    # batch_dim1, batch_dim2, ..., batch_dimd, relation_dim, k1, k2, ..., kt\n",
    "    # expand_to_shape = list(a.shape[:-2]) + target_index_shape\n",
    "    onehots = [ F.one_hot(index, num_classes=k) for index, k in zip(indices, target_index_shape) ]\n",
    "    onehots = [ torch.transpose(onehot, -1, -2) for onehot in onehots ]\n",
    "    # tile_repeats = [ [ x if j!=i else 1 for j, x in enumerate(target_index_shape) ] for i in range(len(target_index_shape)) ]\n",
    "    # tile_repeats = [ [1,]*(len(a.shape)-2) + ele + [1,] for ele in tile_repeats ]\n",
    "    # print(tile_repeats) \n",
    "    re_shapes = [ [ 1 if j!=i else -1 for j in range(t) ] for i in range(t) ]\n",
    "    re_shapes = [ batch_dims + shape + [r,] for shape in re_shapes ]\n",
    "    expand_to_shape = batch_dims + target_index_shape + [r, ]\n",
    "    # onehots = [ torch.tile(onehot, tile_repeat) for onehot, tile_repeat in zip(onehots, tile_repeats) ]\n",
    "    onehots = [ torch.reshape(onehot, re_shape).expand(expand_to_shape) for onehot, re_shape in zip(onehots, re_shapes) ]\n",
    "    if target_index_mask != None:\n",
    "        mask_reshape = batch_dims + [1,] * t + [r, ]\n",
    "        target_index_mask = torch.reshape(target_index_mask, mask_reshape).expand(expand_to_shape)\n",
    "        onehots.append(target_index_mask)\n",
    "    onehot = torch.stack(onehots, dim=-1)\n",
    "    mask = torch.any(torch.all(onehot.bool(), dim=-1), dim=-1)\n",
    "    return mask\n",
    "\n",
    "# batch_dim1, batch_dim2, ..., batch_dimd, relation_dim, index_dim\n",
    "a = torch.tensor([[[1,2],[0,1]], [[0,2],[1,1]]])\n",
    "index_shape = [3, 4]\n",
    "# batch_dim1, batch_dim2, ..., batch_dimd, relation_dim\n",
    "index_mask = torch.tensor([[True, False], [True, True]])\n",
    "mask = index_to_mask(a, index_shape, index_mask)\n",
    "answer = torch.zeros([2, 3, 4])\n",
    "answer[0,1,2] = 1\n",
    "answer[1,0,2] = 1\n",
    "answer[1,1,1] = 1\n",
    "assert(torch.all(torch.eq(mask, answer.bool())) == True)\n",
    "\n",
    "mask = index_to_mask(a, index_shape)\n",
    "answer[0,0,1] = 1\n",
    "assert(torch.all(torch.eq(mask, answer.bool())) == True)\n",
    "print(\"index to mask test passed.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FFNN(torch.nn.Module):\n",
    "    def __init__(self, input_size, num_hidden_layers, \n",
    "            hidden_size, output_size, dropout):\n",
    "        \"\"\"\n",
    "            :param input_size: input size\n",
    "            :param num_hidden_layers: count of hidden layers. 0 for MLP.\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        ffnn_layer_list = []        \n",
    "        current_input_size = input_size        \n",
    "        for i in range(num_hidden_layers):\n",
    "            ffnn_layer_list.append(torch.nn.Linear(current_input_size, hidden_size, bias=True))\n",
    "            ffnn_layer_list.append(torch.nn.ReLU())\n",
    "            if dropout is not None:\n",
    "                ffnn_layer_list.append(torch.nn.Dropout(dropout))\n",
    "            current_input_size = hidden_size\n",
    "        ffnn_layer_list.append(torch.nn.Linear(current_input_size, output_size, bias=True))\n",
    "\n",
    "        self.ffnn = torch.nn.Sequential(*ffnn_layer_list)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.ffnn(x)\n",
    "    \n",
    "class Projection(FFNN):\n",
    "    def __init__(self, input_size, output_size):\n",
    "        super().__init__(\n",
    "            input_size=input_size,\n",
    "            num_hidden_layers=0,\n",
    "            hidden_size=-1, # why -1?\n",
    "            output_size=output_size,\n",
    "            dropout=None\n",
    "        )\n",
    "        \n",
    "def test_ffnn():\n",
    "    ffnn = FFNN(input_size=2, num_hidden_layers=1, hidden_size=10, output_size=2, dropout=0.5)\n",
    "    projection = Projection(2,2)\n",
    "    print(ffnn)\n",
    "    x = torch.randn([4,2])\n",
    "    out = ffnn(x)\n",
    "    x = torch.randn([2,3,2])\n",
    "    out = ffnn(x)\n",
    "    print(\"ffnn test passed\")\n",
    "\n",
    "test_ffnn()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MentionScorerLayer(torch.nn.Module):\n",
    "    def __init__(self, emb, args):\n",
    "        super().__init__()\n",
    "        self.ffnn = FFNN(\n",
    "            input_size=emb, # input_size\n",
    "            num_hidden_layers=args.ffnn_depth, # num_hidden_layers\n",
    "            hidden_size=args.ffnn_size,  # hidden_size\n",
    "            output_size=1, # output_size\n",
    "            dropout=args.dropout)\n",
    "    \n",
    "    def forward(self, span_emb):\n",
    "        # TODO: implement width feature with nn.Embedding or something\n",
    "        return self.ffnn(span_emb)\n",
    "    \n",
    "def test_mention_scorer(args):\n",
    "    mention_emb_size = 2 * args.hidden_size\n",
    "    mention_scorer = MentionScorerLayer(mention_emb_size, args)\n",
    "    print(mention_scorer)\n",
    "    # [batch_size, num_candidates, emb]\n",
    "    x = torch.randn([args.train_batch_size, args.max_num_candidates, mention_emb_size])\n",
    "    out = mention_scorer(x)\n",
    "    print(\"mention scorer test passed\")\n",
    "    \n",
    "test_mention_scorer(global_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SlowScorerLayer(torch.nn.Module):\n",
    "    def __init__(self, emb, args):\n",
    "        super().__init__()\n",
    "        self.ffnn = FFNN(\n",
    "            input_size=emb, # input_size\n",
    "            num_hidden_layers=args.ffnn_depth, # num_hidden_layers\n",
    "            hidden_size=args.ffnn_size,  # hidden_size\n",
    "            output_size=1, # output_size\n",
    "            dropout=args.dropout)\n",
    "\n",
    "    def forward(self, top_span_emb, top_antecedents, top_antecedent_emb):\n",
    "        \"\"\"\n",
    "        top_span_emb: [b, k, emb]\n",
    "        top_antecedents: [b, k, c]\n",
    "        top_antecedent_emb: [b, k, c, emb]\n",
    "        \"\"\"\n",
    "        # TODO accept feature embedding\n",
    "\n",
    "        beam_size = top_antecedents.shape[-1]\n",
    "        # target_emb - [b, k, 1, emb]\n",
    "        target_emb = torch.unsqueeze(top_span_emb, -2)\n",
    "        # similarity_emb - [b, k, c, emb] This is a broadcasted dot-product similarity.\n",
    "        similarity_emb = top_antecedent_emb * target_emb\n",
    "        target_emb_shape = [-1,] * len(top_antecedent_emb.shape)\n",
    "        target_emb_shape[-2] = beam_size\n",
    "        # target_emb - [b, k, c, emb]\n",
    "        target_emb = target_emb.expand(target_emb_shape)\n",
    "        # pair_emb - [b, k, c, emb']\n",
    "        pair_emb = torch.cat([target_emb, top_antecedent_emb, similarity_emb], -1)\n",
    "        slow_antecedent_scores = self.ffnn(pair_emb).squeeze(-1)\n",
    "        return slow_antecedent_scores\n",
    "    \n",
    "def test_slow_scorer(args):\n",
    "    b = args.train_batch_size\n",
    "    k = args.max_num_candidates\n",
    "    c = args.max_top_antecedents\n",
    "    mention_emb_size = 2 * args.hidden_size\n",
    "    mention_pair_emb_size = 3 * mention_emb_size\n",
    "    top_span_emb = torch.randn([b, k, mention_emb_size])\n",
    "    top_antecedents = torch.randint(low=0, high=k, size=[b, k, c])\n",
    "    top_antecedent_emb = torch.randn([b, k, c, mention_emb_size])\n",
    "    slow_scorer = SlowScorerLayer(mention_pair_emb_size, args)\n",
    "    print(slow_scorer)\n",
    "    out = slow_scorer(top_span_emb, top_antecedents, top_antecedent_emb)\n",
    "    print(\"slow scorer test passed\")\n",
    "    \n",
    "test_slow_scorer(global_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FastScorerLayer(torch.nn.Module):\n",
    "    def __init__(self, emb, args):\n",
    "        super().__init__()\n",
    "        self.dropout = args.dropout\n",
    "        self.source_dropout = torch.nn.Dropout(self.dropout)\n",
    "        self.sim_ffnn = Projection(emb, emb)\n",
    "        self.target_dropout = torch.nn.Dropout(self.dropout)\n",
    "\n",
    "    def forward(self, source_top_span_emb, target_top_span_emb): \n",
    "        \"\"\"\n",
    "        source/target_top_span_emb: actually the same when applied, of shape [batch_size, k, emb]\n",
    "        \"\"\"\n",
    "        # [batch_size, k, emb]\n",
    "        source_top_span_emb = self.source_dropout(self.sim_ffnn(source_top_span_emb))\n",
    "        # [batch_size, k, emb]\n",
    "        target_top_span_emb = self.target_dropout(target_top_span_emb)\n",
    "        # [batch_size, k, k]\n",
    "        return torch.matmul(source_top_span_emb,\n",
    "            target_top_span_emb.transpose(1, 2))\n",
    "    \n",
    "def test_fast_scorer(args):\n",
    "    b = args.train_batch_size\n",
    "    k = args.max_num_candidates\n",
    "    mention_emb_size = 2 * args.hidden_size\n",
    "    top_span_emb = torch.randn([b, k, mention_emb_size])\n",
    "    fast_scorer = FastScorerLayer(mention_emb_size, args)\n",
    "    print(fast_scorer)\n",
    "    out = fast_scorer(top_span_emb, top_span_emb)\n",
    "    print(\"fast scorer test passed\")\n",
    "\n",
    "test_fast_scorer(global_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RelationModel(torch.nn.Module):\n",
    "    def __init__(self, args):\n",
    "        super().__init__()\n",
    "        self.dropout = args.dropout\n",
    "        self.max_num_candidates = args.max_num_candidates\n",
    "        self.max_top_antecedents = args.max_top_antecedents\n",
    "        # self.bert_config = BertConfig.from_json_file(args.bert_config_file)              \n",
    "        # self.encoder = BertModel(self.bert_config)\n",
    "        self.encoder = BertModel.from_pretrained(args.bert_model)\n",
    "        mention_emb_size = 2 * args.hidden_size # BERT hidden size\n",
    "        self.mention_scorer = MentionScorerLayer(mention_emb_size, args)\n",
    "        self.fast_scorer = FastScorerLayer(mention_emb_size, args)\n",
    "        mention_pair_emb_size = 3 * mention_emb_size # + feature_size but this is 0\n",
    "        self.slow_scorer = SlowScorerLayer(mention_pair_emb_size, args)\n",
    "\n",
    "    def get_parameters(self, args):\n",
    "        \"\"\"\n",
    "        customize hyper-parameters for parameters of layers\n",
    "        \"\"\"\n",
    "        return [\n",
    "            {'params': self.parameters(), 'lr': args.learning_rate },\n",
    "        ]\n",
    "\n",
    "    def extra_repr(self):\n",
    "        return \"Hi! I'm extra representation.\"\n",
    "\n",
    "    def get_span_emb(self, context_outputs, span_starts, span_ends):\n",
    "        # context_outputs - [batch_size, num_words, hidden_size]\n",
    "        # span_starts - [batch_size, num_candidates]\n",
    "        # span_ends - [batch_size, num_candidates]\n",
    "        span_emb_list = []\n",
    "        # expanded_span_starts/ends - [batch_size, num_candidates, hidden_size]\n",
    "        span_indices_shape = [-1, -1, context_outputs.shape[2]] \n",
    "        expanded_span_starts = span_starts.unsqueeze(2).expand(span_indices_shape)\n",
    "        expanded_span_ends = span_ends.unsqueeze(2).expand(span_indices_shape)\n",
    "        # span_start/end_emb - [batch_size, num_candidates, hidden_size]\n",
    "        span_start_emb = torch.gather(context_outputs, 1, expanded_span_starts)\n",
    "        span_end_emb = torch.gather(context_outputs, 1, expanded_span_ends)\n",
    "        span_emb_list.append(span_start_emb)\n",
    "        span_emb_list.append(span_end_emb)\n",
    "\n",
    "        # TODO implement embeddings for other features\n",
    "\n",
    "        # span_emb - [batch_size, num_candidates, hidden_size*2]\n",
    "        span_emb = torch.cat(span_emb_list, 2)\n",
    "        return span_emb\n",
    "        \n",
    "    def get_mention_scores(self, span_emb, span_starts, span_ends):\n",
    "        return self.mention_scorer(span_emb)\n",
    "    \n",
    "    def get_slow_antecedent_scores(self, top_span_emb, \n",
    "            top_antecedents, top_antecedent_emb, feature_emb):\n",
    "        return self.slow_scorer(top_span_emb, top_antecedents, top_antecedent_emb)\n",
    "    \n",
    "    def get_fast_antecedent_scores(self, top_span_emb):       \n",
    "        return self.fast_scorer(top_span_emb, top_span_emb)\n",
    "\n",
    "    def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, span_mask, c):\n",
    "        # antecedents_mask - [batch_size, num_candidates, num_candidates]\n",
    "        antecedents_mask = span_mask.unsqueeze(2) + span_mask.unsqueeze(1)\n",
    "        antecedents_mask = antecedents_mask >= 1\n",
    "        # fast_antecedent_scores - [batch_size, num_candidates, num_candidates]\n",
    "        fast_antecedent_scores = top_span_mention_scores.unsqueeze(2) + \\\n",
    "                top_span_mention_scores.unsqueeze(1)\n",
    "        fast_antecedent_scores += torch.log(antecedents_mask.float())\n",
    "        fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb)\n",
    "\n",
    "        # TODO: c > num_candidates?\n",
    "        # top_antecedents - [batch_size, num_candidates, c]\n",
    "        _, top_antecedents = torch.topk(fast_antecedent_scores, c, sorted=False)\n",
    "        top_antecedents_mask = torch.gather(antecedents_mask, 2, top_antecedents)\n",
    "        top_fast_antecedent_scores = torch.gather(fast_antecedent_scores, 2, top_antecedents)\n",
    "        return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores\n",
    "\n",
    "    def forward(self, input_ids, \n",
    "            input_mask, \n",
    "            candidate_starts, \n",
    "            candidate_ends, \n",
    "            candidate_mask):\n",
    "        # input_ids - [batch_size, num_words] (num takes max for batched data)\n",
    "        # input_mask - [batch_size, num_words]\n",
    "        # candidate_starts - [batch_size, num_candidates]\n",
    "        # candidate_ends - [batch_size, num_candidates]\n",
    "        # candidate_mask - [batch_size, num_candidates]\n",
    "        # span_doc - [batch_size, num_words, hidden_size]\n",
    "        span_doc = self.encoder(\n",
    "            input_ids=input_ids,\n",
    "            attention_mask=input_mask\n",
    "        ).last_hidden_state\n",
    "        num_words = torch.sum(input_mask, dim=1)\n",
    "        # candidate_span_emb - [batch_size, num_candidates, emb]\n",
    "        candidate_span_emb = self.get_span_emb(span_doc, \n",
    "            candidate_starts, candidate_ends)\n",
    "        # candidate_mention_scores - [batch_size, num_candidates, 1]\n",
    "        candidate_mention_scores = self.get_mention_scores(candidate_span_emb, \n",
    "            candidate_starts, candidate_ends)\n",
    "        # candidate_mention_scores - [batch_size, num_candidates]\n",
    "        candidate_mention_scores = torch.squeeze(candidate_mention_scores, 2)\n",
    "        \n",
    "        # TODO: limit on max top antecedents?\n",
    "        # k = torch.sum(candidate_mask, dim=1)\n",
    "        beam_size = min(self.max_top_antecedents, self.max_num_candidates)\n",
    "\n",
    "        # just for consistency\n",
    "        top_span_starts = candidate_starts\n",
    "        top_span_ends = candidate_ends\n",
    "        top_span_emb = candidate_span_emb\n",
    "        top_span_mention_scores = candidate_mention_scores\n",
    "\n",
    "        dummy_scores = torch.zeros_like(top_span_mention_scores).unsqueeze(2)\n",
    "\n",
    "        top_antecedents,  \\\n",
    "        top_antecedents_mask, \\\n",
    "        top_fast_antecedent_scores = self.coarse_to_fine_pruning(\n",
    "            top_span_emb, top_span_mention_scores, candidate_mask, beam_size\n",
    "        )\n",
    "\n",
    "        # TODO: fine grained?\n",
    "        # top_antecedent_scores - [batch_size, num_candidates, beam_size]\n",
    "        top_antecedent_scores = top_fast_antecedent_scores\n",
    "        # top_antecedent_scores - [batch_size, num_candidates, beam_size + 1]\n",
    "        top_antecedent_scores = torch.cat([dummy_scores, top_antecedent_scores], 2)\n",
    "        \n",
    "        return [candidate_starts, candidate_ends, candidate_mask,\n",
    "            top_antecedents, top_antecedents_mask, top_antecedent_scores]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def softmax_loss(antecedent_scores, antecedent_labels):\n",
    "    # [batch_size, num_candidates, beam_size + 1]\n",
    "    gold_scores = antecedent_scores + torch.log(antecedent_labels.float())\n",
    "    marginalized_gold_scores = torch.logsumexp(gold_scores, dim=2)\n",
    "    log_norm = torch.logsumexp(antecedent_scores, dim=2)\n",
    "    return log_norm - marginalized_gold_scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_loss(output, target, max_num_candidates):\n",
    "    _, _, candidate_mask, top_antecedents, \\\n",
    "    top_antecedents_mask, top_antecedent_scores = output\n",
    "    relations, relation_mask = target\n",
    "    # relations - [batch_size, max_relation_count, 2]\n",
    "    # label[b][relation[b][r][0]][relation[b][r][1]] = 1    \n",
    "    indicator_shape = [ max_num_candidates, max_num_candidates ]\n",
    "    # pairwise_labels - [batch_size, num_candidates, num_candidates]\n",
    "    pairwise_labels = index_to_mask(relations, indicator_shape, relation_mask)\n",
    "    # dummy_labels - [batch_size, num_candidates, 1]\n",
    "    dummy_labels = torch.logical_not(torch.any(pairwise_labels, 2, keepdim=True))\n",
    "    print(dummy_labels.shape, candidate_mask.shape)\n",
    "    dummy_labels = torch.logical_and(dummy_labels, candidate_mask.unsqueeze(-1))\n",
    "    \n",
    "    # pairwise_labels - [batch_size, num_candidates, beam_size]\n",
    "    pairwise_labels = torch.gather(pairwise_labels, 2, top_antecedents)\n",
    "    # top_antecedent_labels - [batch_size, num_candidates, beam_size + 1]\n",
    "    top_antecedent_labels = torch.cat([dummy_labels, pairwise_labels], 2)\n",
    "    # [batch_size, num_candidates]\n",
    "    loss = softmax_loss(top_antecedent_scores, top_antecedent_labels)\n",
    "    loss = torch.sum(loss)   \n",
    "    loss = torch.sum(loss)\n",
    "    print(loss.shape)\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_relation_model(args):\n",
    "    # input_ids - [batch_size, num_words] (num takes max for batched data)\n",
    "    # input_mask - [batch_size, num_words]\n",
    "    # candidate_starts - [batch_size, num_candidates]\n",
    "    # candidate_ends - [batch_size, num_candidates]\n",
    "    # candidate_mask - [batch_size, num_candidates]\n",
    "    # relations - [batch_size, max_relation_count, 2]\n",
    "    # relations_mask - [batch_size, max_relation_count]\n",
    "    b = args.train_batch_size\n",
    "    num_words = 128\n",
    "    # k = args.max_num_candidates\n",
    "    k = 20\n",
    "    id_low = 100\n",
    "    id_high = 1000 \n",
    "    input_ids = torch.randint(low=id_low, high=id_high, size=[b, num_words])\n",
    "    input_mask = torch.ones([b, num_words]).bool()\n",
    "    candidates = [ [x, x+3] for x in range(1, 101, 5)] # one batch\n",
    "    candidates = [ candidates, ] * b\n",
    "    candidate_intervals = torch.tensor(candidates)\n",
    "    candidate_starts, candidate_ends = torch.split(candidate_intervals, 1, dim=-1)\n",
    "    candidate_starts = candidate_starts.squeeze(-1)\n",
    "    candidate_ends = candidate_ends.squeeze(-1)\n",
    "    # candidate_mask = torch.ones_like(candidate_starts).bool()\n",
    "    candidate_mask = torch.ones([b, k]).bool()\n",
    "    model = RelationModel(args)\n",
    "    # print(model)\n",
    "    out = model(input_ids, input_mask, candidate_starts, candidate_ends, candidate_mask)\n",
    "    print(\"relation model test passed.\")\n",
    "    \n",
    "    relations = [ [x, x+3] for x in range(10)]\n",
    "    relations = [ relations, ] * b\n",
    "    relations = torch.tensor(relations)\n",
    "    relations_mask = torch.ones([b, 10]).bool()\n",
    "    target = (relations, relations_mask)\n",
    "    loss = get_loss(out, target, k)\n",
    "    loss.backward()\n",
    "    \n",
    "test_relation_model(global_args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "hysteria = \"a psychoneurosis marked by emotional excitability and disturbances of \" \\\n",
    "    \"the psychogenic, sensory, vasomotor, and visceral functions\"\n",
    "global_tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n",
    "tokenized = global_tokenizer(hysteria)[\"input_ids\"]\n",
    "raw = global_tokenizer.convert_ids_to_tokens(tokenized)\n",
    "ids = global_tokenizer.convert_tokens_to_ids(raw)\n",
    "print(raw)\n",
    "print(ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mask = [[1,1,0,0], [1,0,0,0], [1,1,1,0]]\n",
    "mask = torch.tensor(mask, dtype=torch.float32)\n",
    "print(torch.logsumexp(mask, dim=-1))\n",
    "mask_2d = mask.unsqueeze(2) * mask.unsqueeze(1)\n",
    "print(torch.logical_not(torch.any(mask_2d.bool(), 2, keepdim=True)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([4., 0., 2., 0., 3.])\n",
      "torch.Size([0])\n",
      "2.5\n"
     ]
    }
   ],
   "source": [
    "a = torch.tensor([4,2,3]).float()\n",
    "b = torch.tensor([1,0,1,0,1]).bool()\n",
    "c = torch.zeros([5])\n",
    "c = torch.masked_scatter(c, b, a)\n",
    "print(c)\n",
    "a = torch.tensor([1,2,3])\n",
    "b = torch.tensor([0, 0, 0]).bool()\n",
    "c = torch.masked_select(a, b)\n",
    "print(c.shape)\n",
    "d = [1,2,3,4]\n",
    "print(np.mean(d))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Evaluator:\n",
    "    def __init__(self, f_beta=1):\n",
    "        self.beta = f_beta\n",
    "        pass\n",
    "\n",
    "    def f_measure(self, precision, recall):\n",
    "        if precision == 0 or recall == 0:\n",
    "            return 0\n",
    "        return (1+self.beta ** 2)*(precision*recall) / ((self.beta ** 2)*precision + recall)\n",
    "    \n",
    "    def __call__(self, top_antecedent_scores, top_antecedent_labels, candidate_mask):\n",
    "        return self.evaluate(top_antecedent_scores, top_antecedent_labels, candidate_mask)\n",
    "\n",
    "    def evaluate(self, top_antecedent_scores, top_antecedent_labels, candidate_mask):\n",
    "        # top_antecedent_scores - [batch_size, num_candidates, beam_size + 1]\n",
    "        # top_antecedent_labels - [batch_size, num_candidates, beam_size + 1]\n",
    "        # candidate_mask - [batch_size, num_candidates]\n",
    "        batch_size = top_antecedent_labels.shape[0]\n",
    "        max_num_candidates = top_antecedent_labels.shape[1]\n",
    "        max_top_antecedents = top_antecedent_labels.shape[2]-1\n",
    "        # rel_num_per_cand - [batch_size, num_candidates]\n",
    "        rel_num_per_cand = torch.sum(top_antecedent_labels, dim=-1)\n",
    "        \n",
    "        batch_precisions = []\n",
    "        batch_recalls = []\n",
    "        batch_accuracies = []\n",
    "        for i in range(batch_size):\n",
    "            precision_pos, precision_total = 0, 0\n",
    "            recall_pos, recall_total = 0, 0\n",
    "            for j in range(max_num_candidates):\n",
    "                # iterate on all valid candidates\n",
    "                if not candidate_mask[i][j]:\n",
    "                    continue\n",
    "                # topk is performed on the last dim by default\n",
    "                # (values, indices) is returned but score value is unused\n",
    "                # TODO do we need sorting?\n",
    "                \n",
    "                # antecedent_indices = torch.arange(0, max_num_candidates)\n",
    "                antecedent_indices = torch.arange(-1, max_top_antecedents)\n",
    "\n",
    "                # predicted_antecedents - vector, indices of all predicted antecedents\n",
    "                valid_antecedent_mask = top_antecedent_scores[i, j] > 0\n",
    "                predicted_antecedents = torch.masked_select(antecedent_indices, valid_antecedent_mask).tolist()\n",
    "                predicted_antecedents = set(predicted_antecedents)\n",
    "                # if model predicts a dummy antecedent\n",
    "                if len(predicted_antecedents) == 0:\n",
    "                    predicted_antecedents.add(-1)\n",
    "                \n",
    "                # evaluated_antecedents - vector, indices of top predicted antecedents, \n",
    "                # expected to have the same number as that of relations involving current candidate, that is, rel_num_per_cand.\n",
    "                # but when the number of predicted antecedents are less than relation count,\n",
    "                # the evaluated antecedents falls to predicted antecedents.\n",
    "                evaluated_scores, evaluated_antecedents = torch.topk(top_antecedent_scores[i, j], rel_num_per_cand[i, j], sorted=True)\n",
    "                valid_evaluated_mask = evaluated_scores > 0\n",
    "                evaluated_antecedents = torch.masked_select(evaluated_antecedents, valid_evaluated_mask).tolist()\n",
    "                evaluated_antecedents = set(evaluated_antecedents)\n",
    "                # if model predicts a dummy antecedent\n",
    "                if len(predicted_antecedents) == 0:\n",
    "                    evaluated_antecedents.add(-1)\n",
    "\n",
    "                # gold_antecedents. If gold label is the dummy antecedent, the gold antecedents contains -1 only.\n",
    "                gold_antecedents = torch.masked_select(antecedent_indices, top_antecedent_labels[i, j]).tolist()\n",
    "                gold_antecedents = set(gold_antecedents)\n",
    "\n",
    "                # two ways of determining positive antecedents.\n",
    "                # positive predicted antecedents\n",
    "                pos_pred_ant = predicted_antecedents & gold_antecedents\n",
    "                # positive evaluated antecedents\n",
    "                pos_eval_ant = evaluated_antecedents & gold_antecedents\n",
    "                # Only one of them shall be used.\n",
    "                pos_ant = pos_pred_ant\n",
    "\n",
    "                precision_pos += len(pos_ant)\n",
    "                precision_total += len(predicted_antecedents)\n",
    "                recall_pos += len(pos_ant)\n",
    "                recall_total += len(gold_antecedents)\n",
    "                print(len(pos_ant), len(predicted_antecedents), len(gold_antecedents))\n",
    "            \n",
    "            precision = precision_pos / precision_total\n",
    "            recall = recall_pos / recall_total\n",
    "            accuracy = self.f_measure(precision, recall)\n",
    "            batch_precisions.append(precision)\n",
    "            batch_recalls.append(recall)\n",
    "            batch_accuracies.append(accuracy)\n",
    "        \n",
    "        batch_precision = np.mean(batch_precisions)\n",
    "        batch_recall = np.mean(batch_recalls)\n",
    "        batch_accuracy = np.mean(batch_accuracies)\n",
    "        return batch_precision, batch_recall, batch_accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2 3 2\n",
      "1 2 2\n",
      "0 1 1\n",
      "(0.5, 0.6, 0.5454545454545454)\n"
     ]
    }
   ],
   "source": [
    "top_antecedent_scores = [[[0,1,2,3,0], [0,4,-1,5, 0], [0,1,-1,-1, 0],[0,-1,-2,-3, 0]]]\n",
    "top_antecedent_labels = [[[0,0,1,1, 0], [0,1,1,0, 0],  [0,0,1,0, 0],  [1,0,0,0, 0]]]\n",
    "candidate_mask = [[1,1,1,0]]\n",
    "scores = torch.tensor(top_antecedent_scores).float()\n",
    "labels = torch.tensor(top_antecedent_labels).bool()\n",
    "mask = torch.tensor(candidate_mask).bool()\n",
    "evaluator = Evaluator()\n",
    "result = evaluator.evaluate(scores, labels, mask)\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.tensor(1)\n",
    "b = float(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['model-step-125', 'model-step-01345', 'model-step-012345']\n"
     ]
    }
   ],
   "source": [
    "strs = [\"model-step-01345\", \"model-step-012345\", \"model-step-125\"]\n",
    "a = \"model-step-012345\"\n",
    "pattern = re.compile(\"^model-step-0*([1-9][0-9]*)$\")\n",
    "sort_dirs = lambda x: eval(pattern.search(x).group(1))\n",
    "strs.sort(key=sort_dirs)\n",
    "print(strs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
