{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import csv\n",
    "import logging\n",
    "import os\n",
    "import random\n",
    "import sys\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n",
    "                              TensorDataset)\n",
    "from torch.utils.data.distributed import DistributedSampler\n",
    "from tqdm import tqdm, trange\n",
    "\n",
    "from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\n",
    "from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME\n",
    "#from pytorch_pretrained_bert.tokenization import BertTokenizer\n",
    "from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n",
    "\n",
    "from bertviz.bertviz import attention, visualization\n",
    "from bertviz.bertviz.pytorch_pretrained_bert import BertModel, BertTokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "logger = logging.getLogger(__name__)\n",
    "bert_classifier_model_dir = \"./bert_classifier/\" ## Path of BERT classifier model path\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "n_gpu = torch.cuda.device_count()\n",
    "logger.info(\"device: {}, n_gpu {}\".format(device, n_gpu))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# file paths\n",
    "data_dir = \"/home/ubuntu/bhargav/data/\"\n",
    "dataset = \"yelp\" # amazon / yelp / imagecaption\n",
    "train_0 = os.path.join(data_dir ,\"./{}/sentiment_train_0.txt\".format(dataset))\n",
    "train_1 = os.path.join(data_dir,\"./{}/sentiment_train_1.txt\".format(dataset))\n",
    "test_0 = os.path.join(data_dir,\"./{}/sentiment_test_0.txt\".format(dataset))\n",
    "test_1 = os.path.join(data_dir,\"./{}/sentiment_test_1.txt\".format(dataset))\n",
    "dev_0 = os.path.join(data_dir,\"./{}/sentiment_dev_0.txt\".format(dataset))\n",
    "dev_1 = os.path.join(data_dir,\"./{}/sentiment_dev_1.txt\".format(dataset))\n",
    "reference_0 = os.path.join(data_dir,\"./{}/reference_0.txt\".format(dataset))\n",
    "reference_1 = os.path.join(data_dir,\"./{}/reference_1.txt\".format(dataset))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# file paths\n",
    "data_dir = \"/home/ubuntu/bhargav/data/\"\n",
    "dataset = \"yelp\" # amazon / yelp / imagecaption\n",
    "train_0_out = os.path.join(data_dir ,\"./{}/processed_files_with_bert_with_best_head/sentiment_train_0.txt\".format(dataset))\n",
    "train_1_out = os.path.join(data_dir,\"./{}/processed_files_with_bert_with_best_head/sentiment_train_1.txt\".format(dataset))\n",
    "test_0_out = os.path.join(data_dir,\"./{}/processed_files_with_bert_with_best_head/sentiment_test_0.txt\".format(dataset))\n",
    "test_1_out = os.path.join(data_dir,\"./{}/processed_files_with_bert_with_best_head/sentiment_test_1.txt\".format(dataset))\n",
    "dev_0_out = os.path.join(data_dir,\"./{}/processed_files_with_bert_with_best_head/sentiment_dev_0.txt\".format(dataset))\n",
    "dev_1_out = os.path.join(data_dir,\"./{}/processed_files_with_bert_with_best_head/sentiment_dev_1.txt\".format(dataset))\n",
    "reference_0_out = os.path.join(data_dir,\"./{}/processed_files_with_bert_with_best_head/reference_0.txt\".format(dataset))\n",
    "reference_1_out = os.path.join(data_dir,\"./{}/processed_files_with_bert_with_best_head/reference_1.txt\".format(dataset))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Model for performing Classification\n",
    "model_cls = BertForSequenceClassification.from_pretrained(bert_classifier_model_dir, num_labels=2)\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n",
    "model_cls.to(device)\n",
    "model_cls.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Model to get the attention weights of all the heads\n",
    "model = BertModel.from_pretrained(bert_classifier_model_dir)\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n",
    "model.to(device)\n",
    "model.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_len=70 # Maximum sequence length \n",
    "sm = torch.nn.Softmax(dim=-1) ## Softmax over the batch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "common_words=['is','are','was','were','has','have','had','a','an','the','this','that','these','those','there','how','i','we',\n",
    "             'he','she','it','they','them','their','his','him','her','us','our', 'and','in','my','your','you', 'will', 'shall']\n",
    "common_words_tokens = tokenizer.convert_tokens_to_ids(common_words)\n",
    "not_to_remove_ids = tokenizer.convert_tokens_to_ids([\"[CLS]\",\"[SEP]\", \".\", \"?\", \"!\"])\n",
    "not_to_remove_ids += common_words_tokens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_file(file_path):\n",
    "    with open(file_path) as fp:\n",
    "        data = fp.read().splitlines()\n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_output_file(original_sentences,processed_sentences, output_file, sentiment=\"<POS>\"):\n",
    "    with open(output_file,\"w\") as fp:\n",
    "        for sen1,sen2 in zip(original_sentences,processed_sentences):\n",
    "            if sen1 != None and sen2 != None:\n",
    "                str1 = sentiment + \" <CON_START> \" + sen2 + \" <START> \" + sen1 + \" <END>\\n\"\n",
    "                fp.write(str1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_ref_output_file(processed_sentences, output_file, sentiment=\"<POS>\"):\n",
    "    with open(output_file,\"w\") as fp:\n",
    "        for sen in tqdm(processed_sentences):\n",
    "            if sen != None:\n",
    "                str1 = sentiment + \" <CON_START> \" + sen + \" <START>\\n\"\n",
    "                fp.write(str1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def concate_files(inp_files, out_files):\n",
    "    with open(out_files,\"w\") as fp:\n",
    "        for file in inp_files:\n",
    "            with open(file) as f:\n",
    "                for line in f:\n",
    "                    fp.write(line)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_attn_examples(input_sentences, layer, head, bs=128):\n",
    "    \"\"\"\n",
    "    Returns Attention weights for selected Layer and Head along with ids and tokens\n",
    "    of the input_sentence\n",
    "    \"\"\"\n",
    "    ids = []\n",
    "    ids_to_decode = [None for k in range(len(input_sentences))]\n",
    "    tokens_to_decode = [None for k in range(len(input_sentences))]\n",
    "    segment_ids = []\n",
    "    input_masks = []\n",
    "    attention_weights = [None for z in input_sentences]\n",
    "    ## BERT pre-processing\n",
    "    for j,sen in enumerate(tqdm(input_sentences)):\n",
    "        \n",
    "        text_tokens = tokenizer.tokenize(sen)\n",
    "        if len(text_tokens) >= max_seq_len-2:\n",
    "            text_tokens = text_tokens[:max_seq_len-4]\n",
    "        tokens = [\"[CLS]\"] + text_tokens + [\"[SEP]\"]\n",
    "        tokens_to_decode[j] = tokens\n",
    "        temp_ids = tokenizer.convert_tokens_to_ids(tokens)\n",
    "        ids_to_decode[j] = temp_ids\n",
    "        input_mask = [1] * len(temp_ids)\n",
    "        segment_id = [0] * len(temp_ids)\n",
    "        padding = [0] * (max_seq_len - len(temp_ids))\n",
    "        \n",
    "        \n",
    "        temp_ids += padding\n",
    "        input_mask += padding\n",
    "        segment_id += padding\n",
    "        \n",
    "        ids.append(temp_ids)\n",
    "        input_masks.append(input_mask)\n",
    "        segment_ids.append(segment_id)\n",
    "    \n",
    "    # Convert Ids to Torch Tensors\n",
    "    ids = torch.tensor(ids) \n",
    "    segment_ids = torch.tensor(segment_ids)\n",
    "    input_masks = torch.tensor(input_masks)\n",
    "    \n",
    "    steps = len(ids) // bs\n",
    "    \n",
    "    for i in trange(steps+1):\n",
    "        if i == steps:\n",
    "            temp_ids = ids[i * bs : len(ids)]\n",
    "            temp_segment_ids = segment_ids[i * bs: len(ids)]\n",
    "            temp_input_masks = input_masks[i * bs: len(ids)]\n",
    "        else:\n",
    "            temp_ids = ids[i * bs : i * bs + bs]\n",
    "            temp_segment_ids = segment_ids[i * bs: i * bs + bs]\n",
    "            temp_input_masks = input_masks[i * bs: i * bs + bs]\n",
    "        \n",
    "        temp_ids = temp_ids.to(device)\n",
    "        temp_segment_ids = temp_segment_ids.to(device)\n",
    "        temp_input_masks = temp_input_masks.to(device)\n",
    "        with torch.no_grad():\n",
    "             _, _, attn = model(temp_ids, temp_segment_ids, temp_input_masks)\n",
    "        # Concate Attention weights\n",
    "        for j in range(len(attn[layer]['attn_probs'])):\n",
    "            attention_weights[i * bs + j] = (attn[layer]['attn_probs'][j][head][0]).to('cpu')\n",
    "    \n",
    "    return attention_weights, ids_to_decode, tokens_to_decode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def prepare_data(aw, ids_to_decode, tokens_to_decode):\n",
    "    out_sen = [None for i in range(len(aw))]\n",
    "    for i in trange(len(aw)):\n",
    "        #topv, topi = aw[i].topk(len(inps_tokens[i]))\n",
    "        topv, topi = aw[i].topk(ids_to_decode[i].index(0))\n",
    "        topi = topi.tolist()\n",
    "        topv = topv.tolist()\n",
    "        #print(i,train_0[i])\n",
    "        #print(tokens_to_decode[i])\n",
    "        #print(\"Original Top Indexes = {}\".format(topi))\n",
    "        topi = [topi[j] for j in range(len(topi)) if ids_to_decode[i][topi[j]] not in not_to_remove_ids] # remove noun and common words\n",
    "        #print(\"After removing Nouns = {}\".format(topi))\n",
    "        topi = [topi[j] for j in range(len(topi)) if \"##\" not in tokens_to_decode[i][topi[j]]] # Remove half words\n",
    "        #print(\"After removing Half-words = {}\".format(topi))\n",
    "\n",
    "        if (len(topi) < 4 and len(topi) > 0):\n",
    "            topi = [topi[0]]\n",
    "        elif(len(topi) < 8):\n",
    "            topi = topi[:2]\n",
    "        else:\n",
    "            topi = topi[:3]\n",
    "\n",
    "        #print(\"Final Topi = {}\".format(topi))\n",
    "        final_indexes = []\n",
    "        count = 0\n",
    "        count1 = 0\n",
    "        #print(ids_to_decode[i], tokens_to_decode[i])\n",
    "        while ids_to_decode[i][count] != 0:\n",
    "            if count in topi:\n",
    "                while ids_to_decode[i][count + count1 + 1] != 0:\n",
    "                    if \"##\" in tokens_to_decode[i][count + count1 + 1]:\n",
    "                        count1 += 1\n",
    "                    else:\n",
    "                        break\n",
    "                count += count1\n",
    "                count1 = 0\n",
    "            else:\n",
    "                final_indexes.append(ids_to_decode[i][count])\n",
    "            count += 1\n",
    "\n",
    "        #print(final_indexes)\n",
    "        temp_out_sen = tokenizer.convert_ids_to_tokens(final_indexes)\n",
    "        temp_out_sen = \" \".join(temp_out_sen).replace(\" ##\", \"\").replace(\"[CLS]\",\"\").replace(\"[SEP]\",\"\")\n",
    "        #print(temp_out_sen, \"\\n\\n\")\n",
    "        out_sen[i] = temp_out_sen.strip()\n",
    "    \n",
    "    return out_sen"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_0_data = read_file(train_0)\n",
    "train_1_data = read_file(train_1)\n",
    "dev_0_data = read_file(dev_0)\n",
    "dev_1_data = read_file(dev_1)\n",
    "test_0_data = read_file(test_0)\n",
    "test_1_data = read_file(test_1)\n",
    "ref_0_data = read_file(reference_0)\n",
    "ref_1_data = read_file(reference_1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(train_0_data, layer=9, head=7, bs=128)\n",
    "train_0_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_output_file(train_0_data, train_0_out_sen, train_0_out, sentiment=\"<NEG>\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(train_1_data, layer=9, head=7, bs=128)\n",
    "train_1_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_output_file(train_1_data, train_1_out_sen, train_1_out, sentiment=\"<POS>\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(dev_0_data, layer=9, head=7, bs=128)\n",
    "dev_0_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_output_file(dev_0_data, dev_0_out_sen, dev_0_out, sentiment=\"<NEG>\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(dev_1_data, layer=9, head=7, bs=128)\n",
    "dev_1_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_output_file(dev_1_data, dev_1_out_sen, dev_1_out, sentiment=\"<POS>\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(test_1_data, layer=9, head=7, bs=128)\n",
    "test_1_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_output_file(test_1_data, test_1_out_sen, test_1_out, sentiment=\"<POS>\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(test_0_data, layer=9, head=7, bs=128)\n",
    "test_0_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_output_file(test_0_data, test_0_out_sen, test_0_out, sentiment=\"<NEG>\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(ref_1_data, layer=9, head=7, bs=128)\n",
    "ref_1_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_ref_output_file(ref_1_data, ref_1_out_sen, reference_1_out, sentiment=\"<NEG>\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aw, ids_to_decode, tokens_to_decode = run_attn_examples(ref_0_data, layer=9, head=7, bs=128)\n",
    "ref_0_out_sen = prepare_data(aw, ids_to_decode, tokens_to_decode)\n",
    "create_ref_output_file(ref_0_data, ref_0_out_sen, reference_0_out, sentiment=\"<POS>\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
