{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "random.seed(10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def compute_presence_accuracy(ret_out):\n",
    "    correct = 0\n",
    "    for d in ret_out:\n",
    "        if d['ref'].strip() in d['output']:\n",
    "            correct += 1\n",
    "    return correct / len(ret_out)\n",
    "\n",
    "# Compute BLEU score\n",
    "from nltk.translate.bleu_score import sentence_bleu\n",
    "#convert string to list of words\n",
    "def str2list(s):\n",
    "    return s.split()\n",
    "\n",
    "#Take a reference and a candidate paragraph and compute the BLEU score.\n",
    "def computeBLEU(reference, candidate):\n",
    "    #convert candidate and reference to list of words\n",
    "    candidate = str2list(candidate)\n",
    "    reference = str2list(reference)\n",
    "    # compute BLEU score between candidate and reference use upto n-gram\n",
    "    # where n is minimum of number of words in reference and 4\n",
    "    n = min(len(reference), 4)\n",
    "    BLEUscore = sentence_bleu([reference], candidate, weights=[1./n]*n)\n",
    "    return BLEUscore\n",
    "\n",
    "def compute_BLEU(ret_out):\n",
    "    BLEUscores = []\n",
    "    for d in ret_out:\n",
    "        BLEUscores.append(computeBLEU(d['ref'], d['output']))\n",
    "    return sum(BLEUscores)/len(BLEUscores)\n",
    "\n",
    "# Find the first number in the string\n",
    "import re\n",
    "def find_first_num(s):\n",
    "    m = re.search(r'\\d+', s)\n",
    "    if m:\n",
    "        return m.group()\n",
    "    else:\n",
    "        return None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "def get_metrics(FILE, INP_FILE=None, tok_limit=None):\n",
    "    # Function to compute metrics on WikiQA\n",
    "    # FILE: path to json file generated by run_inference_WikiQA.py\n",
    "    # INP_FILE: input file for WikiQA, can be None if tok_limit is None\n",
    "    # tok_limit: if set, will only compute metrics for inputs with token length <= tok_limit\n",
    "\n",
    "    # Load json FILE\n",
    "    with open(FILE) as f:\n",
    "        ret_out = json.load(f)\n",
    "    \n",
    "    if tok_limit is not None:\n",
    "        # Load json INP_FILE\n",
    "        with open(INP_FILE) as f:\n",
    "            inp_data = json.load(f)\n",
    "            \n",
    "        print(len(ret_out), len(inp_data))\n",
    "        print(\"Old len of data: \" + str(len(ret_out)))\n",
    "        for idx in range(len(ret_out)):\n",
    "            ret_out[idx]['tok_len'] = inp_data[idx]['conversations'][0]['tok_len']\n",
    "            if(ret_out[idx]['input'] != inp_data[idx]['conversations'][0]['value']):\n",
    "                print(\"Error\")\n",
    "        ret_out = [d for d in ret_out if (d['tok_len']<=tok_limit)]\n",
    "        tok_len_stat = [d['tok_len'] for d in ret_out]\n",
    "        print(sum(tok_len_stat)/len(tok_len_stat),max(tok_len_stat))\n",
    "        print(\"New len of data: \" + str(len(ret_out)))\n",
    "    \n",
    "    # Convert each ['output'] to ['output'][0]\n",
    "    for i in range(len(ret_out)):\n",
    "        ret_out[i]['output'] = ret_out[i]['output'][0]\n",
    "    \n",
    "    # Convert ret_out into a pandas dataframe\n",
    "    import pandas as pd\n",
    "    df = pd.DataFrame(ret_out)\n",
    "\n",
    "    # Add a column 'question location' to df\n",
    "    # Every even row is \"end\" and every odd row is \"start\"\n",
    "    df['question location'] = df.index % 2\n",
    "    # Change the value of 'question location' to \"start\" and \"end\"\n",
    "    df['question location'] = df['question location'].apply(lambda x: \"start\" if x == 1 else \"end\")\n",
    "\n",
    "    # Add a column 'answer location' to df\n",
    "    # Every 6 rows, first 2 have the value 'start', next 2 are 'random' and last 2 are 'end'\n",
    "    df['answer location'] = df.index % 6\n",
    "    # Change the value of 'answer location' to \"start\", \"random\" and \"end\"\n",
    "    df['answer location'] = df['answer location'].apply(lambda x: \"start\" if int(x/2) == 0 else \"random\" if int(x/2) == 1 else \"end\")\n",
    "\n",
    "    # Add a column for exact match accuracy in string\n",
    "    df['exact match'] = df.apply(lambda x: 1 if (x['ref'].strip() in x['output']) else 0, axis=1)\n",
    "    # Add a column for BLEU score\n",
    "    df['BLEU score'] = df.apply(lambda x: computeBLEU(x['ref'], x['output']), axis=1)\n",
    "\n",
    "    # print average exact match accuracy and average BLEU score\n",
    "    print(\"Average exact match accuracy: \", df['exact match'].mean())\n",
    "    print(\"Average BLEU score: \", df['BLEU score'].mean())\n",
    "\n",
    "    # Print average exact match accuracy and average BLEU score for each answer location\n",
    "    print(\"Average exact match accuracy for each answer location: \")\n",
    "    print(df.groupby('answer location')['exact match'].mean())\n",
    "    print(\"Average BLEU score for each answer location: \")\n",
    "    print(df.groupby('answer location')['BLEU score'].mean())\n",
    "\n",
    "    # Print average exact match accuracy and average BLEU score for each question location\n",
    "    print(\"Average exact match accuracy for each question location: \")\n",
    "    print(df.groupby('question location')['exact match'].mean())\n",
    "    print(\"Average BLEU score for each question location: \")\n",
    "    print(df.groupby('question location')['BLEU score'].mean())"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Compute metrics on 4k FFQA data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "FILE = None # Add path to the generated file here\n",
    "get_metrics(FILE)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Compute metrics on same FFQA data but only ones with input <=4k tokens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Some of the data in the file might cross 4k tokens (the mean is 4k, refer data statistics)\n",
    "FILE = None # Add path to the generated file here\n",
    "INP_FILE = '../../../datasets/WikiQA/Free_Form_QA/ffqa_4k.json'\n",
    "TOK_LIMIT = 4096\n",
    "GEN_SLACK = 256\n",
    "\n",
    "# To ensure input+generated is within 4k, we limit the input by an additional 256 tokens\n",
    "get_metrics(FILE, INP_FILE, tok_limit = TOK_LIMIT - GEN_SLACK)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
