{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%reload_ext autoreload\n",
    "%autoreload 2\n",
    "%matplotlib inline\n",
    "\n",
    "import kenlm\n",
    "from tqdm import tqdm\n",
    "import fastText\n",
    "import pandas as pd\n",
    "from bleu import *\n",
    "import torch, os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.\n"
     ]
    }
   ],
   "source": [
    "#bert classifier\n",
    "\n",
    "from tqdm import trange\n",
    "\n",
    "from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\n",
    "from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME\n",
    "from pytorch_pretrained_bert.tokenization import BertTokenizer\n",
    "\n",
    "model_cls = BertForSequenceClassification.from_pretrained(\"./bert_classifier/imagecaption\", num_labels=2)\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n",
    "\n",
    "model_cls.to('cuda')\n",
    "model_cls.eval()\n",
    "\n",
    "max_seq_len=70\n",
    "sm = torch.nn.Softmax(dim=-1)\n",
    "\n",
    "def evaluate_dev_set(input_sentences, labels, bs=32):\n",
    "    \"\"\"\n",
    "    To evaluate whole dataset and return accuracy\n",
    "    \"\"\"\n",
    "    ids = []\n",
    "    segment_ids = []\n",
    "    input_masks = []\n",
    "    pred_lt = []\n",
    "    for sen in input_sentences:\n",
    "        text_tokens = tokenizer.tokenize(sen)\n",
    "        if len(text_tokens) >= max_seq_len - 2:\n",
    "            text_tokens = text_tokens[:max_seq_len - 3]\n",
    "        tokens = [\"[CLS]\"] + text_tokens + [\"[SEP]\"]\n",
    "        temp_ids = tokenizer.convert_tokens_to_ids(tokens)\n",
    "        input_mask = [1] * len(temp_ids)\n",
    "        segment_id = [0] * len(temp_ids)\n",
    "        padding = [0] * (max_seq_len - len(temp_ids))\n",
    "\n",
    "        temp_ids += padding\n",
    "        input_mask += padding\n",
    "        segment_id += padding\n",
    "        \n",
    "        ids.append(temp_ids)\n",
    "        input_masks.append(input_mask)\n",
    "        segment_ids.append(segment_id)\n",
    "    \n",
    "    ids = torch.tensor(ids).to('cuda')\n",
    "    segment_ids = torch.tensor(segment_ids).to('cuda')\n",
    "    input_masks = torch.tensor(input_masks).to('cuda')\n",
    "    \n",
    "    steps = len(ids) // bs\n",
    "    \n",
    "    for i in trange(steps+1):\n",
    "        if i == steps:\n",
    "            temp_ids = ids[i * bs : len(ids)]\n",
    "            temp_segment_ids = segment_ids[i * bs: len(ids)]\n",
    "            temp_input_masks = input_masks[i * bs: len(ids)]\n",
    "        else:\n",
    "            temp_ids = ids[i * bs : i * bs + bs]\n",
    "            temp_segment_ids = segment_ids[i * bs: i * bs + bs]\n",
    "            temp_input_masks = input_masks[i * bs: i * bs + bs]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            preds = sm(model_cls(temp_ids, temp_segment_ids, temp_input_masks))\n",
    "        \n",
    "        #preds = preds.view(-1,bs)\n",
    "        try:\n",
    "            args = torch.argmax(preds, dim=-1)\n",
    "            pred_lt.extend(args.tolist())\n",
    "        except RuntimeError:\n",
    "            pass\n",
    "    accuracy = sum(np.array(pred_lt) == np.array(labels)) / len(labels)\n",
    "    \n",
    "    return accuracy, pred_lt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:pytorch_pretrained_bert.tokenization_gpt2:loading vocabulary file https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json from cache at /home/ubuntu/.pytorch_pretrained_bert/f2808208f9bec2320371a9f5f891c184ae0b674ef866b79c58177067d15732dd.1512018be4ba4e8726e41b9145129dc30651ea4fec86aa61f4b9f40bf94eac71\n",
      "INFO:pytorch_pretrained_bert.tokenization_gpt2:loading merges file https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt from cache at /home/ubuntu/.pytorch_pretrained_bert/d629f792e430b3c76a1291bb2766b0a047e36fae0588f9dbc1ae51decdff691b.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda\n",
      "INFO:pytorch_pretrained_bert.modeling_gpt2:loading weights file https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin from cache at /home/ubuntu/.pytorch_pretrained_bert/4295d67f022061768f4adc386234dbdb781c814c39662dd1662221c309962c55.778cf36f5c4e5d94c8cd9cefcf2a580c8643570eb327f0d4a1f007fab2acbdf1\n",
      "INFO:pytorch_pretrained_bert.modeling_gpt2:loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json from cache at /home/ubuntu/.pytorch_pretrained_bert/4be02c5697d91738003fb1685c9872f284166aa32e061576bbe6aaeb95649fcf.085d5f6a8e7812ea05ff0e6ed0645ab2e75d80387ad55c1ad9806ee70d272f80\n",
      "INFO:pytorch_pretrained_bert.modeling_gpt2:Model config {\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"layer_norm_epsilon\": 1e-05,\n",
      "  \"n_ctx\": 1024,\n",
      "  \"n_embd\": 768,\n",
      "  \"n_head\": 12,\n",
      "  \"n_layer\": 12,\n",
      "  \"n_positions\": 1024,\n",
      "  \"vocab_size\": 50257\n",
      "}\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pytorch_pretrained_bert import GPT2Tokenizer, GPT2Model, GPT2LMHeadModel\n",
    "\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "import logging\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "\n",
    "lm_tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n",
    "lm_model = GPT2LMHeadModel.from_pretrained('gpt2')\n",
    "path = os.path.join(os.getcwd(), \"GPT2/imagecaption_language_model_2.bin\")\n",
    "lm_model_state_dict = torch.load(path)\n",
    "lm_model.load_state_dict(lm_model_state_dict)\n",
    "lm_model.to(device)\n",
    "lm_model.eval()\n",
    "\n",
    "lm_loss = torch.nn.CrossEntropyLoss(ignore_index=-1, reduction='none')\n",
    "\n",
    "\n",
    "def calculate_ppl_gpt2(sentence_batch, bs=16):\n",
    "    # tokenize the sentences\n",
    "    tokenized_ids = [None for i in range(len(sentence_batch))]\n",
    "    ppl = [None for i in range(len(sentence_batch))]\n",
    "    \n",
    "    for i in range(len(sentence_batch)):\n",
    "        tokenized_ids[i] = lm_tokenizer.encode(sentence_batch[i])\n",
    "        \n",
    "    sen_lengths = [len(x) for x in tokenized_ids]\n",
    "    max_sen_length = max(sen_lengths)\n",
    "    \n",
    "    n_batch = len(sentence_batch)\n",
    "    input_ids = np.zeros( shape=(n_batch, max_sen_length), dtype=np.int64)\n",
    "    lm_labels = np.full(shape=(n_batch, max_sen_length), fill_value=-1)\n",
    "    \n",
    "    for i, tokens in enumerate(tokenized_ids):\n",
    "        input_ids[i, :len(tokens)] = tokens\n",
    "        lm_labels[i, :len(tokens)-1] = tokens[1:] \n",
    "    \n",
    "    input_ids = torch.tensor(input_ids)#.to(device)\n",
    "    lm_labels = torch.tensor(lm_labels)#.to(device)\n",
    "    \n",
    "    steps = n_batch // bs\n",
    "    \n",
    "    for i in range(steps+1):\n",
    "        \n",
    "        if i == steps:\n",
    "            temp_input_ids = input_ids[i * bs : n_batch]\n",
    "            temp_lm_labels = lm_labels[i * bs : n_batch]\n",
    "            temp_sen_lengths = sen_lengths[i * bs : n_batch]\n",
    "        else:\n",
    "            temp_input_ids = input_ids[i * bs : i * bs + bs]\n",
    "            temp_lm_labels = lm_labels[i * bs : i * bs + bs]\n",
    "            temp_sen_lengths = sen_lengths[i * bs : i * bs + bs]\n",
    "            \n",
    "        temp_input_ids = temp_input_ids.to('cuda')\n",
    "        temp_lm_labels = temp_lm_labels.to('cuda')\n",
    "            \n",
    "        with torch.no_grad():\n",
    "            lm_pred = lm_model(temp_input_ids)\n",
    "            \n",
    "        loss_val = lm_loss(lm_pred[0].view(-1, lm_pred[0].size(-1)), temp_lm_labels.view(-1))\n",
    "        normalized_loss = loss_val.view(len(temp_input_ids),-1).sum(dim= -1) / torch.tensor(temp_sen_lengths, dtype=torch.float32).to(device)\n",
    "        tmp_ppl = torch.exp(normalized_loss)\n",
    "        ppl[i * bs: i * bs + len(temp_input_ids)] = tmp_ppl.tolist()\n",
    "    \n",
    "    return  ppl\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#fasttext classifier\n",
    "classifier_model = fastText.load_model('fasttextmodel/imagecaption_model.bin')\n",
    "\n",
    "#kenlm lm\n",
    "kenlm_lm = kenlm.Model('kenlmmodel/imagecaption.arpa')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/9 [00:00<?, ?it/s]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:03,  2.73it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  2.94it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:02,  3.10it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.19it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.23it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.26it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:02<00:00,  3.29it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.32it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.35it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      " 11%|█         | 1/9 [00:04<00:33,  4.18s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.81it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.71it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.65it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.63it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.62it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.60it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.59it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.58it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.58it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.80it/s]\u001b[A\n",
      " 22%|██▏       | 2/9 [00:08<00:29,  4.16s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.74it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.67it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.62it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.60it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.60it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.59it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.57it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.57it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.78it/s]\u001b[A\n",
      " 33%|███▎      | 3/9 [00:12<00:24,  4.15s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.71it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.64it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.61it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.59it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.58it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.57it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.57it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.77it/s]\u001b[A\n",
      " 44%|████▍     | 4/9 [00:16<00:20,  4.14s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.79it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.68it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.62it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.61it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.60it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.58it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.58it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.58it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.57it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.78it/s]\u001b[A\n",
      " 56%|█████▌    | 5/9 [00:20<00:16,  4.15s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.73it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.65it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.61it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.59it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.58it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.57it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.56it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.77it/s]\u001b[A\n",
      " 67%|██████▋   | 6/9 [00:25<00:12,  4.21s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.67it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.63it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.60it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.58it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.57it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.56it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.56it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.77it/s]\u001b[A\n",
      " 78%|███████▊  | 7/9 [00:29<00:08,  4.23s/it]\n",
      "  0%|          | 0/9 [00:00<?, ?it/s]\u001b[A\n",
      " 11%|█         | 1/9 [00:00<00:02,  3.58it/s]\u001b[A\n",
      " 22%|██▏       | 2/9 [00:00<00:01,  3.57it/s]\u001b[A\n",
      " 33%|███▎      | 3/9 [00:00<00:01,  3.55it/s]\u001b[A\n",
      " 44%|████▍     | 4/9 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 56%|█████▌    | 5/9 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 67%|██████▋   | 6/9 [00:01<00:00,  3.54it/s]\u001b[A\n",
      " 78%|███████▊  | 7/9 [00:01<00:00,  3.54it/s]\u001b[A\n",
      " 89%|████████▉ | 8/9 [00:02<00:00,  3.54it/s]\u001b[A\n",
      "100%|██████████| 9/9 [00:02<00:00,  3.57it/s]\u001b[A\n",
      " 89%|████████▉ | 8/9 [00:37<00:04,  4.63s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.47it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.54it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.53it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.54it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.54it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.75it/s]\u001b[A\n",
      "100%|██████████| 9/9 [00:41<00:00,  4.60s/it]\n"
     ]
    }
   ],
   "source": [
    "df = pd.read_csv('imagecaption_all_model_prediction_1.csv', header = None)\n",
    "label = 0\n",
    "label_str = '__label__0'\n",
    "\n",
    "list_sentences = df[1:len(df)].values.tolist()\n",
    "\n",
    "list_sentences_source = []\n",
    "list_sentences_human = []\n",
    "for list_sentance in list_sentences:\n",
    "    list_sentences_source.append(list_sentance[0])\n",
    "    list_sentences_human.append(list_sentance[-1])\n",
    "\n",
    "matrics1 = []\n",
    "for i in tqdm(range(0, len(list_sentences[0]))):\n",
    "    bleu_s = 0\n",
    "    bleu_r = 0\n",
    "    fasttext_c = 0\n",
    "    kenlm_ppl = 0\n",
    "    gpt2_ppl = 0\n",
    "    \n",
    "    sentences = []\n",
    "    for j in range(0, len(list_sentences)):\n",
    "        if(pd.isnull(list_sentences[j][i])):\n",
    "            continue\n",
    "        sentences.append(list_sentences[j][i])\n",
    "        \n",
    "    fasttext_labels = classifier_model.predict(sentences)\n",
    "    \n",
    "    total_sentences = len(sentences)\n",
    "\n",
    "    bleu_s = get_bleu(list_sentences_source, sentences)\n",
    "    bleu_r = get_bleu(list_sentences_human, sentences)\n",
    "\n",
    "    for _, sentence in enumerate(sentences):\n",
    "        if(fasttext_labels[0][_][0]==label_str):\n",
    "            fasttext_c += 1\n",
    "        kenlm_ppl += kenlm_lm.perplexity(sentence)\n",
    "        \n",
    "    labels_list = [label] * len(sentences)\n",
    "\n",
    "    bert_accuracy, pred_label_list = evaluate_dev_set(sentences, labels_list)\n",
    "    ppl_list_gpt2 = calculate_ppl_gpt2(sentences)\n",
    "    \n",
    "    for j in range(0, len(ppl_list_gpt2)):\n",
    "        gpt2_ppl += ppl_list_gpt2[j]\n",
    "\n",
    "    matrics1.append([bleu_s , bleu_r , fasttext_c/total_sentences , kenlm_ppl/total_sentences, bert_accuracy, gpt2_ppl/len(ppl_list_gpt2)])\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 9/9 [00:14<00:00,  1.66s/it]\n"
     ]
    }
   ],
   "source": [
    "# for i in tqdm(range(0, len(list_sentences[0]))):\n",
    "#     gpt2_ppl = 0\n",
    "    \n",
    "#     sentences = []\n",
    "#     for j in range(0, len(list_sentences)):\n",
    "#         if(pd.isnull(list_sentences[j][i])):\n",
    "#             continue\n",
    "#         sentences.append(list_sentences[j][i])\n",
    "    \n",
    "#     ppl_list_gpt2 = calculate_ppl_gpt2(sentences)\n",
    "#     for j in range(0, len(ppl_list_gpt2)):\n",
    "#         gpt2_ppl += ppl_list_gpt2[j]\n",
    "\n",
    "#     matrics1[i].append(gpt2_ppl/len(ppl_list_gpt2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/9 [00:00<?, ?it/s]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:03,  2.88it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.09it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:02,  3.19it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.25it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.29it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.33it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:02<00:00,  3.35it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.37it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.39it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.60it/s]\u001b[A\n",
      " 11%|█         | 1/9 [00:04<00:33,  4.15s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.78it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.70it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.63it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.60it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.58it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.58it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.57it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.56it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.77it/s]\u001b[A\n",
      " 22%|██▏       | 2/9 [00:08<00:29,  4.15s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.71it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.64it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.60it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.58it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.57it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.56it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.56it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.55it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.55it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.76it/s]\u001b[A\n",
      " 33%|███▎      | 3/9 [00:12<00:24,  4.16s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.64it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.61it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.57it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.54it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.54it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.54it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.75it/s]\u001b[A\n",
      " 44%|████▍     | 4/9 [00:17<00:21,  4.28s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.56it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.57it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.55it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.53it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.53it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.53it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.74it/s]\u001b[A\n",
      " 56%|█████▌    | 5/9 [00:21<00:17,  4.30s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.64it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.62it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.56it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.54it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.74it/s]\u001b[A\n",
      " 67%|██████▋   | 6/9 [00:26<00:13,  4.38s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.70it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.62it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.56it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.53it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.53it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.52it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.73it/s]\u001b[A\n",
      " 78%|███████▊  | 7/9 [00:30<00:08,  4.36s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.63it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.59it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.55it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.53it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      " 89%|████████▉ | 8/9 [00:38<00:04,  4.81s/it]\n",
      "  0%|          | 0/10 [00:00<?, ?it/s]\u001b[A\n",
      " 10%|█         | 1/10 [00:00<00:02,  3.61it/s]\u001b[A\n",
      " 20%|██        | 2/10 [00:00<00:02,  3.59it/s]\u001b[A\n",
      " 30%|███       | 3/10 [00:00<00:01,  3.56it/s]\u001b[A\n",
      " 40%|████      | 4/10 [00:01<00:01,  3.55it/s]\u001b[A\n",
      " 50%|█████     | 5/10 [00:01<00:01,  3.54it/s]\u001b[A\n",
      " 60%|██████    | 6/10 [00:01<00:01,  3.53it/s]\u001b[A\n",
      " 70%|███████   | 7/10 [00:01<00:00,  3.53it/s]\u001b[A\n",
      " 80%|████████  | 8/10 [00:02<00:00,  3.53it/s]\u001b[A\n",
      " 90%|█████████ | 9/10 [00:02<00:00,  3.52it/s]\u001b[A\n",
      "100%|██████████| 10/10 [00:02<00:00,  3.74it/s]\u001b[A\n",
      "100%|██████████| 9/9 [00:42<00:00,  4.77s/it]\n"
     ]
    }
   ],
   "source": [
    "df = pd.read_csv('imagecaption_all_model_prediction_0.csv', header = None)\n",
    "label = 1\n",
    "label_str = '__label__1'\n",
    "\n",
    "list_sentences = df[1:len(df)].values.tolist()\n",
    "\n",
    "list_sentences_source = []\n",
    "list_sentences_human = []\n",
    "for list_sentance in list_sentences:\n",
    "    list_sentences_source.append(list_sentance[0])\n",
    "    list_sentences_human.append(list_sentance[-1])\n",
    "\n",
    "matrics0 = []\n",
    "for i in tqdm(range(0, len(list_sentences[0]))):\n",
    "    bleu_s = 0\n",
    "    bleu_r = 0\n",
    "    fasttext_c = 0\n",
    "    kenlm_ppl = 0\n",
    "    gpt2_ppl = 0\n",
    "\n",
    "    sentences = []\n",
    "    for j in range(0, len(list_sentences)):\n",
    "        if(pd.isnull(list_sentences[j][i])):\n",
    "            continue\n",
    "        sentences.append(list_sentences[j][i])\n",
    "        \n",
    "    fasttext_labels = classifier_model.predict(sentences)\n",
    "    \n",
    "    total_sentences = len(sentences)\n",
    "    \n",
    "    bleu_s = get_bleu(list_sentences_source, sentences)\n",
    "    bleu_r = get_bleu(list_sentences_human, sentences)\n",
    "    \n",
    "    for _, sentence in enumerate(sentences):\n",
    "        if(fasttext_labels[0][_][0]==label_str):\n",
    "            fasttext_c += 1\n",
    "        kenlm_ppl += kenlm_lm.perplexity(sentence)\n",
    "        \n",
    "    labels_list = [label] * len(sentences)\n",
    "    bert_accuracy, pred_label_list = evaluate_dev_set(sentences, labels_list)\n",
    "    \n",
    "    ppl_list_gpt2 = calculate_ppl_gpt2(sentences)\n",
    "\n",
    "    for j in range(0, len(ppl_list_gpt2)):\n",
    "        gpt2_ppl += ppl_list_gpt2[j]\n",
    "        \n",
    "    matrics0.append([bleu_s , bleu_r , fasttext_c/total_sentences , kenlm_ppl/total_sentences, bert_accuracy, gpt2_ppl/len(ppl_list_gpt2)])\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[100.0, 39.87944345358151, 0.98, 71.11467744760239, 0.6933333333333334, 34.39835715134939]\n",
      "[24.92056428895913, 21.76467691499278, 1.0, 7.267386331689432, 1.0, 7.927897548675537]\n",
      "[53.76071199348269, 29.713073271739376, 0.9966666666666667, 51.69677062697555, 0.59, 74.064298842748]\n",
      "[49.09943533601552, 26.90944662837605, 0.9966666666666667, 30.49334370915424, 0.7366666666666667, 41.53330832163493]\n",
      "[59.311072999300876, 36.10505325823188, 0.9966666666666667, 48.23631147057629, 0.8733333333333333, 51.96432491461436]\n",
      "[47.074873410530884, 38.836287631862426, 1.0, 23.240615692689445, 0.9866666666666667, 27.62262170950572]\n",
      "[69.90959012446687, 42.80293604104987, 0.8966666666666666, 204.70766625865213, 0.69, 26.737085501352947]\n",
      "[12.89936397051252, 19.5490483856569, 0.9728813559322034, 659.302723232918, 0.7864406779661017, 43.01603642116159]\n",
      "[36.256820971076934, 100.0, 0.9933333333333333, 109.83743714546452, 0.82, 36.76217499256134]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[None, None, None, None, None, None, None, None, None]"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "[print(i) for i in matrics0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[100.0, 39.59805219279706, 0.02, 71.11467744760239, 0.30666666666666664, 34.39835715134939]\n",
      "[23.236296228468905, 23.10680525628537, 0.016666666666666666, 10.131880712102145, 0.93, 12.273937996228536]\n",
      "[53.77309695241419, 29.392399961802912, 0.023333333333333334, 55.34128283008179, 0.49666666666666665, 86.45355588595072]\n",
      "[48.23012622194243, 27.05896019128694, 0.03, 28.247175534833158, 0.5833333333333334, 39.369440463383995]\n",
      "[58.8079940029184, 36.499275332307356, 0.15333333333333332, 42.9083329745895, 0.7733333333333333, 53.05547393957774]\n",
      "[51.03982230583542, 37.52242985249541, 0.35, 23.519130799891176, 0.9133333333333333, 29.930170119603474]\n",
      "[66.64320101223511, 43.321603338624456, 0.22333333333333333, 101.63720073179675, 0.72, 30.990958212216697]\n",
      "[13.346641728109384, 18.38065693490144, 0.07317073170731707, 811.8717184392814, 0.5923344947735192, 46.99428256255825]\n",
      "[36.635239934223485, 100.0, 0.11666666666666667, 139.35899225692074, 0.79, 46.08650080680847]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[None, None, None, None, None, None, None, None, None]"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "[print(i) for i in matrics1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "matricsavg = (np.array(matrics0)+np.array(matrics1))/2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_res0 = pd.DataFrame(matrics0, columns=['BLEU_source','BLEU_human','fasttext_classifier','klm_ppl', 'BERT_classifier', 'gpt2_ppl'])\n",
    "df_res1 = pd.DataFrame(matrics1, columns=['BLEU_source','BLEU_human','fasttext_classifier','klm_ppl', 'BERT_classifier', 'gpt2_ppl'])\n",
    "df_resavg = pd.DataFrame(matricsavg, columns=['BLEU_source','BLEU_human','fasttext_classifier','klm_ppl', 'BERT_classifier', 'gpt2_ppl'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "models_list = df[0:1].values.tolist()\n",
    "#df_res.insert(loc=0, column='GLEU_score', value=gleu_list)\n",
    "df_res0.insert(loc=0, column='model', value=models_list[0])\n",
    "df_res1.insert(loc=0, column='model', value=models_list[0])\n",
    "df_resavg.insert(loc=0, column='model', value=models_list[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>model</th>\n",
       "      <th>BLEU_source</th>\n",
       "      <th>BLEU_human</th>\n",
       "      <th>fasttext_classifier</th>\n",
       "      <th>klm_ppl</th>\n",
       "      <th>BERT_classifier</th>\n",
       "      <th>gpt2_ppl</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Source</td>\n",
       "      <td>100.000000</td>\n",
       "      <td>39.738748</td>\n",
       "      <td>0.500000</td>\n",
       "      <td>71.114677</td>\n",
       "      <td>0.500000</td>\n",
       "      <td>34.398357</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>CROSSALIGNED</td>\n",
       "      <td>24.078430</td>\n",
       "      <td>22.435741</td>\n",
       "      <td>0.508333</td>\n",
       "      <td>8.699634</td>\n",
       "      <td>0.965000</td>\n",
       "      <td>10.100918</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>STYLEEMBEDDING</td>\n",
       "      <td>53.766904</td>\n",
       "      <td>29.552737</td>\n",
       "      <td>0.510000</td>\n",
       "      <td>53.519027</td>\n",
       "      <td>0.543333</td>\n",
       "      <td>80.258927</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>MULTIDECODER</td>\n",
       "      <td>48.664781</td>\n",
       "      <td>26.984203</td>\n",
       "      <td>0.513333</td>\n",
       "      <td>29.370260</td>\n",
       "      <td>0.660000</td>\n",
       "      <td>40.451374</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>DELETEONLY</td>\n",
       "      <td>59.059534</td>\n",
       "      <td>36.302164</td>\n",
       "      <td>0.575000</td>\n",
       "      <td>45.572322</td>\n",
       "      <td>0.823333</td>\n",
       "      <td>52.509899</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>DELETEANDRETRIEVE</td>\n",
       "      <td>49.057348</td>\n",
       "      <td>38.179359</td>\n",
       "      <td>0.675000</td>\n",
       "      <td>23.379873</td>\n",
       "      <td>0.950000</td>\n",
       "      <td>28.776396</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>BERT_DEL</td>\n",
       "      <td>68.276396</td>\n",
       "      <td>43.062270</td>\n",
       "      <td>0.560000</td>\n",
       "      <td>153.172433</td>\n",
       "      <td>0.705000</td>\n",
       "      <td>28.864022</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>BERT_RET_TFIDF</td>\n",
       "      <td>13.123003</td>\n",
       "      <td>18.964853</td>\n",
       "      <td>0.523026</td>\n",
       "      <td>735.587221</td>\n",
       "      <td>0.689388</td>\n",
       "      <td>45.005159</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>HUMAN</td>\n",
       "      <td>36.446030</td>\n",
       "      <td>100.000000</td>\n",
       "      <td>0.555000</td>\n",
       "      <td>124.598215</td>\n",
       "      <td>0.805000</td>\n",
       "      <td>41.424338</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "               model  BLEU_source  BLEU_human  fasttext_classifier  \\\n",
       "0             Source   100.000000   39.738748             0.500000   \n",
       "1       CROSSALIGNED    24.078430   22.435741             0.508333   \n",
       "2     STYLEEMBEDDING    53.766904   29.552737             0.510000   \n",
       "3       MULTIDECODER    48.664781   26.984203             0.513333   \n",
       "4         DELETEONLY    59.059534   36.302164             0.575000   \n",
       "5  DELETEANDRETRIEVE    49.057348   38.179359             0.675000   \n",
       "6           BERT_DEL    68.276396   43.062270             0.560000   \n",
       "7     BERT_RET_TFIDF    13.123003   18.964853             0.523026   \n",
       "8              HUMAN    36.446030  100.000000             0.555000   \n",
       "\n",
       "      klm_ppl  BERT_classifier   gpt2_ppl  \n",
       "0   71.114677         0.500000  34.398357  \n",
       "1    8.699634         0.965000  10.100918  \n",
       "2   53.519027         0.543333  80.258927  \n",
       "3   29.370260         0.660000  40.451374  \n",
       "4   45.572322         0.823333  52.509899  \n",
       "5   23.379873         0.950000  28.776396  \n",
       "6  153.172433         0.705000  28.864022  \n",
       "7  735.587221         0.689388  45.005159  \n",
       "8  124.598215         0.805000  41.424338  "
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_resavg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_res0.to_csv('matrics/imagecaption/matrics_imagecaption_all_model_prediction_0.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_res1.to_csv('matrics/imagecaption/matrics_imagecaption_all_model_prediction_1.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_resavg.to_csv('matrics/imagecaption/matrics_imagecaption_all_model_prediction_avg.csv')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
