{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "712bb927",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
      "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "\n",
    "from transformers import BertTokenizer, BertForMaskedLM\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "\n",
    "\n",
    "WIKI_DIR = \"/data/disk5/private/yuc/coref/wikipedia/text\"\n",
    "DUMP_DIR = \"/data/disk5/private/yuc/coref/bert-tagger/playground/dump_kl\"\n",
    "\n",
    "global_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "global_model = BertForMaskedLM.from_pretrained('bert-base-uncased')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "ebb6f736",
   "metadata": {},
   "outputs": [],
   "source": [
    "DEBUG = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "90e6cb52",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_indices_of_id(tokens, token_id):\n",
    "    ret = tokens.eq(token_id).nonzero(as_tuple=True)\n",
    "    if DEBUG:\n",
    "        print(\"[GET_INDICES_OF_ID]: \", ret[0])\n",
    "    return ret[0]\n",
    "\n",
    "def get_logits_of_index(outputs, index, tokenizer=global_tokenizer):\n",
    "    logits = outputs.logits\n",
    "    logits = torch.squeeze(logits)[index]\n",
    "    return logits\n",
    "\n",
    "def get_prediction_from_probs(probs, tokenizer=global_tokenizer):\n",
    "    print(probs.shape)\n",
    "    pred_id = torch.argmax(probs)\n",
    "    pred_prob = probs[pred_id]\n",
    "    pred_token = tokenizer.convert_ids_to_tokens([pred_id,])[0]\n",
    "    return (pred_id, pred_prob, pred_token)\n",
    "\n",
    "def get_prediction_from_logits(logits, tokenizer=global_tokenizer):\n",
    "    print(logits.shape)\n",
    "    probs = F.softmax(torch.squeeze(logits), dim=0)\n",
    "    return get_prediction_from_probs(probs, tokenizer) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "82a97371",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1, 3])\n"
     ]
    }
   ],
   "source": [
    "a = torch.tensor([1,2,3,2], dtype=torch.float32)\n",
    "id = 2\n",
    "print(get_indices_of_id(a, id))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "6cc785e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def index_only_dist(result, target, index):\n",
    "    return F.relu(target[index] - result[index])\n",
    "\n",
    "def kl_divergence_dist(result, target, index):\n",
    "    return torch.mean(torch.sum(F.softmax(target, dim=0) * ( - F.log_softmax(result, dim=0) + F.log_softmax(target, dim=0))))\n",
    "\n",
    "def cross_entropy_dist(result, target, index):\n",
    "    return torch.mean(torch.sum(- F.softmax(target, dim=0) * F.log_softmax(result, dim=0)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "id": "4d6e225d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def inv_softmax(tensor, dim=1):\n",
    "    res = torch.clone(tensor)\n",
    "    base = torch.amin(res, dim=dim)\n",
    "    base = base.unsqueeze(dim)\n",
    "    res = torch.log(res / base)\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "6ed91c55",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-0.6931, -0.6931])\n",
      "tensor(0.)\n",
      "tensor(1.4013)\n",
      "tensor(1.3357)\n",
      "tensor(2.0142)\n"
     ]
    }
   ],
   "source": [
    "# notice that p1 and p2 are logits\n",
    "p1 = torch.tensor([0.10, 0.40, 0.50])\n",
    "p1 = inv_softmax(p1, dim=0)\n",
    "p2 = torch.tensor([0.80, 0.15, 0.05])\n",
    "p2 = inv_softmax(p2, dim=0)\n",
    "p3 = 1\n",
    "print(F.log_softmax(torch.tensor([1.,1.]), dim=0)) # torch log softmax is based on e, and ln(2) == 0.693\n",
    "print(index_only_dist(p1, p2, p3))\n",
    "print(kl_divergence_dist(p1, p2, p3)) # should be about 1.401\n",
    "print(kl_divergence_dist(p2, p1, p3)) # should be about 1.336\n",
    "print(cross_entropy_dist(p1, p2, p3)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "97b31c99",
   "metadata": {},
   "outputs": [],
   "source": [
    "def wiki_text_file_iterator():\n",
    "    for root, dirs, files in os.walk(WIKI_DIR):\n",
    "        for file_name in files:\n",
    "            file_path = os.path.join(root, file_name)\n",
    "            yield file_path   \n",
    "\n",
    "def sentence_iterator(file_path):\n",
    "    print(file_path)\n",
    "    with open(file_path) as fs:\n",
    "        for line in fs.readlines():\n",
    "            if line[0] == '<':\n",
    "                continue\n",
    "            tokens = line.strip().split()\n",
    "            if len(tokens) >= 30: # ignore doc that is too long\n",
    "                continue\n",
    "            if len(tokens) <= 5: # ignore invalid lines and short sentences\n",
    "                continue\n",
    "            yield tokens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "21fe8015",
   "metadata": {},
   "outputs": [],
   "source": [
    "def question_pair_generator(sentence, masked_placeholder=\"[MASK]\", missing_placeholder=\"[MASK]\"):\n",
    "    for missing_index, missing_token in enumerate(sentence):\n",
    "        for masked_index, masked_token in enumerate(sentence):\n",
    "            if missing_index == masked_index:\n",
    "                continue\n",
    "            unmasked_question = list(sentence)\n",
    "            unmasked_question[missing_index] = missing_placeholder\n",
    "            masked_question = list(unmasked_question)\n",
    "            masked_question[masked_index] = masked_placeholder                    \n",
    "            # context = \" \".join(sentence)\n",
    "            # unmasked_question = \" \".join(unmasked_question)\n",
    "            # masked_question = \" \".join(masked_question)\n",
    "            # answer = sentence[missing_index]\n",
    "            # yield ((context, unmasked_question, masked_question), missing_index, masked_index, answer)\n",
    "            context = sentence\n",
    "            yield ((context, unmasked_question, masked_question), missing_index, masked_index)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "1b3f48a6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def question_pair_consumer(\n",
    "        question_pair, \n",
    "        text_missing_index, \n",
    "        text_masked_index,\n",
    "        # answer,\n",
    "        missing_placeholder=\"[MASK]\",\n",
    "        masked_placeholder=\"[MASK]\",\n",
    "        tokenizer=global_tokenizer,\n",
    "        model=global_model,\n",
    "        measure=cross_entropy_dist):\n",
    "    get_id = lambda x: tokenizer.convert_tokens_to_ids([x, ])[0]\n",
    "    missing_id = get_id(missing_placeholder)\n",
    "    masked_id = get_id(masked_placeholder)\n",
    "    get_token = lambda x: tokenizer.convert_ids_to_tokens([x, ])[0]\n",
    "    \n",
    "    def consume_question(question, context, masked=False):\n",
    "        inputs = tokenizer(question, return_tensors=\"pt\")\n",
    "        labels = tokenizer(context, return_tensors=\"pt\")[\"input_ids\"]\n",
    "        if inputs[\"input_ids\"].shape != labels.shape:\n",
    "            # print(\"Error: inputs and labels are not aligned.\")\n",
    "            return None\n",
    "        try: \n",
    "            outputs = model(**inputs, labels=labels)\n",
    "        except ValueError as e:\n",
    "            print(\"ValueError while inferring\")\n",
    "            print(question, context, inputs[\"input_ids\"].shape, labels.shape)\n",
    "            exit()\n",
    "\n",
    "        inputs = inputs[\"input_ids\"].squeeze()\n",
    "        labels = labels.squeeze()\n",
    "        missing_index, masked_index = -1, -1\n",
    "        if masked and missing_id == masked_id:\n",
    "            indices = get_indices_of_id(inputs, masked_id)\n",
    "            if len(indices) != 2:\n",
    "                print(\"Error: cannot determine missing and masked tokens.\")\n",
    "                return None\n",
    "            missing_index, masked_index = indices if text_missing_index < text_masked_index else (indices[1], indices[0])\n",
    "        else:\n",
    "            get_index = lambda id: get_indices_of_id(inputs, id)[0]\n",
    "            missing_index = get_index(missing_id)\n",
    "            if masked:\n",
    "                masked_index = get_index(masked_id)\n",
    "        \n",
    "        answer_id = labels[missing_index]\n",
    "        if DEBUG:\n",
    "            print(\"inputs: \", inputs, \"labels: \", labels)\n",
    "            print(\"logits: \", outputs.logits)\n",
    "            print(\"placeholder ids: \", missing_id, masked_id)\n",
    "            print(\"missing label index: \", missing_index)\n",
    "        logits = get_logits_of_index(outputs, missing_index)\n",
    "        if DEBUG:\n",
    "            print(\"logits: \", logits)\n",
    "        # probs = F.softmax(torch.squeeze(logits), dim=0)\n",
    "        return logits, answer_id\n",
    "\n",
    "    context, u_question, m_question = list(map(lambda x: \" \".join(x), question_pair))\n",
    "    if DEBUG:\n",
    "        print(\"==========START consuming question==========\")\n",
    "        print(\"context: \", context, \"unmasked: \", u_question, \"masked: \", m_question)\n",
    "    u_pred, answer_id = consume_question(u_question, context, masked=False)\n",
    "    m_pred, answer_id = consume_question(m_question, context, masked=True)\n",
    "    if DEBUG:\n",
    "        print(\"==========END consuming question==========\")\n",
    "    if u_pred == None or m_pred == None:\n",
    "        return None\n",
    "\n",
    "    return measure(m_pred, u_pred, answer_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "id": "c351b20f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def iterate_on_sentence(sentence, measure=kl_divergence_dist):\n",
    "    sentence = sentence.strip().split()\n",
    "    relation_list = []\n",
    "    counter = -1\n",
    "    for question_pair_info in question_pair_generator(sentence):\n",
    "        # print(question_pair_info)\n",
    "        question_pair, missing_index, masked_index = question_pair_info\n",
    "        context = question_pair[0]\n",
    "        missing_token = context[missing_index]\n",
    "        masked_token = context[masked_index]\n",
    "        distance = question_pair_consumer(*question_pair_info, measure=measure)\n",
    "        if distance != None:\n",
    "            relation = {\n",
    "                \"missing_index\": missing_index,\n",
    "                \"masked_index\": masked_index,\n",
    "                \"distance\": float(distance)\n",
    "            }\n",
    "            relation_list.append(relation)\n",
    "        counter -= 1\n",
    "        if counter == 0:\n",
    "            break\n",
    "        \n",
    "    return relation_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "id": "e7e3490c",
   "metadata": {},
   "outputs": [],
   "source": [
    "table = \"This table can be completed with the marginal totals of the two variables\"\n",
    "short = \"cochran – armitage test for trend\"\n",
    "hamlet = \"To be or not to be, that is the question.\"\n",
    "hysteria = \"a psychoneurosis marked by emotional excitability and disturbances of \" \\\n",
    "    \"the psychogenic, sensory, vasomotor, and visceral functions\"\n",
    "village = \"The statistical area Berkhout which also can include the peripheral \" \\\n",
    "    \"parts of the village as well as the surrounding countryside has a population of around 1780.\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "id": "4a8cb180",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[{'missing_index': 2, 'masked_index': 3, 'distance': 6.851987838745117}, {'missing_index': 1, 'masked_index': 2, 'distance': 5.872376441955566}, {'missing_index': 11, 'masked_index': 12, 'distance': 5.051525115966797}, {'missing_index': 3, 'masked_index': 2, 'distance': 4.813048362731934}, {'missing_index': 7, 'masked_index': 8, 'distance': 2.3268322944641113}]\n"
     ]
    }
   ],
   "source": [
    "relations = iterate_on_sentence(table)\n",
    "relations.sort(key=lambda x:x[\"distance\"], reverse=True)\n",
    "print(relations[:5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "37683046",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.9  (conda)",
   "language": "python",
   "name": "pythonjvsc74a57bd0ed789356564aca5994f801776aa8fac70745a610a01bf29c141cd7ff6dd8c909"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
