{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "14360485",
   "metadata": {},
   "source": [
    "#### Set environment variables in [.env](.env) for LLM API calling"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6bd95c11",
   "metadata": {},
   "source": [
    "### Import Dependencies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f1fb3d81-16b6-4b8c-a028-880fdce5e14a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.insert(0, \"../../\")\n",
    "import os\n",
    "import promptwizard\n",
    "from promptwizard.glue.promptopt.instantiate import GluePromptOpt\n",
    "from promptwizard.glue.promptopt.techniques.common_logic import DatasetSpecificProcessing\n",
    "from promptwizard.glue.common.utils.file import save_jsonlist\n",
    "from typing import Any\n",
    "from tqdm import tqdm\n",
    "import json\n",
    "from datasets import load_dataset\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "load_dotenv(override = True)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f061d2fd",
   "metadata": {},
   "source": [
    "### Create a dataset specific class and define the required functions "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f325d33",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def extract_between(start, end, text):\n",
    "    \"\"\"\n",
    "    Extracts the substring from 'text' that is between 'start' and 'end' strings.\n",
    "    \n",
    "    Parameters:\n",
    "    - start (str): The starting delimiter string.\n",
    "    - end (str): The ending delimiter string.\n",
    "    - text (str): The text to search within.\n",
    "    \n",
    "    Returns:\n",
    "    - str: The extracted substring between the start and end delimiters.\n",
    "    \"\"\"\n",
    "    start_index = text.find(start)\n",
    "    if start_index == -1:\n",
    "        return '' \n",
    "    \n",
    "    start_index += len(start)\n",
    "    \n",
    "    end_index = text.find(end, start_index)\n",
    "    if end_index == -1:\n",
    "        return ''  \n",
    "    return text[start_index:end_index]\n",
    "\n",
    "class SVAMP(DatasetSpecificProcessing):\n",
    "\n",
    "    def dataset_to_jsonl(self, dataset_jsonl: str, **kwargs: Any) -> None:\n",
    "        def extract_answer_from_output(completion):\n",
    "\n",
    "                return completion\n",
    "\n",
    "        examples_set = []\n",
    "\n",
    "        for _, sample in tqdm(enumerate(kwargs[\"dataset\"]), desc=\"Evaluating samples\"):\n",
    "            example = {\n",
    "              DatasetSpecificProcessing.QUESTION_LITERAL: sample['question'],\n",
    "              DatasetSpecificProcessing.ANSWER_WITH_REASON_LITERAL: sample['answer'],\n",
    "              DatasetSpecificProcessing.FINAL_ANSWER_LITERAL: extract_answer_from_output(sample[\"answer\"])\n",
    "            }\n",
    "            examples_set.append(example)\n",
    "\n",
    "        save_jsonlist(dataset_jsonl, examples_set, \"w\")\n",
    "\n",
    "    def extract_final_answer(self, answer: str):\n",
    "        \n",
    "        final_answer = extract_between(text=answer,start=\"<ANS_START>\",end=\"<ANS_END>\")\n",
    "        return final_answer\n",
    "    \n",
    "    def access_answer(self, llm_output: str, gt_answer: str):\n",
    "\n",
    "        predicted_answer = self.extract_final_answer(llm_output)\n",
    "        is_correct = False\n",
    "        if predicted_answer and (predicted_answer.lower() == gt_answer.lower()):\n",
    "            is_correct = True\n",
    "\n",
    "        return is_correct, predicted_answer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f384eb57",
   "metadata": {},
   "outputs": [],
   "source": [
    "svamp_processor = SVAMP()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "976681bd-4f43-4dbc-947e-cdb94d4824f0",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "if not os.path.exists(\"data\"):\n",
    "    os.mkdir(\"data\")\n",
    "\n",
    "dataset = load_dataset(\"ChilleD/SVAMP\")\n",
    "\n",
    "for dataset_type in ['train','test']:\n",
    "    data_list = []\n",
    "    num_samples = 0\n",
    "    for data in dataset[dataset_type]:\n",
    "        data_list.append({\"question\": data['question_concat'], \"answer\": data['Answer']})\n",
    "        if dataset_type == 'train' and num_samples == 100: # We sample only 100 train examples and use 25 out them for training randomly\n",
    "            break\n",
    "        num_samples += 1\n",
    "    svamp_processor.dataset_to_jsonl(\"data/\"+ dataset_type+'.jsonl', dataset=data_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4852b94b",
   "metadata": {},
   "source": [
    "### Set paths"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "f43482f1-3e10-4cf7-8ea6-ff42c04067a6",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_file_name = os.path.join(\"data\", \"train.jsonl\")\n",
    "test_file_name = os.path.join(\"data\", \"test.jsonl\")\n",
    "path_to_config = \"configs\"\n",
    "llm_config_path = os.path.join(path_to_config, \"llm_config.yaml\")\n",
    "promptopt_config_path = os.path.join(path_to_config, \"promptopt_config.yaml\")\n",
    "setup_config_path = os.path.join(path_to_config, \"setup_config.yaml\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f7ba6394",
   "metadata": {},
   "source": [
    "### Create an object for calling prompt optimization and inference functionalities"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8af4246f-db32-4b37-a73a-f9e2e5125d09",
   "metadata": {},
   "outputs": [],
   "source": [
    "gp = GluePromptOpt(promptopt_config_path,\n",
    "                   setup_config_path,\n",
    "                   train_file_name,\n",
    "                   svamp_processor)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6e38ea08",
   "metadata": {},
   "source": [
    "### Call prompt optmization function\n",
    "1. ```use_examples``` can be used when there are training samples and a mixture of real and synthetic in-context examples are required in the final prompt. When set to ```False``` all the in-context examples will be real\n",
    "2. ```generate_synthetic_examples``` can be used when there are no training samples and we want to generate synthetic examples \n",
    "3. ```run_without_train_examples``` can be used when there are no training samples and in-context examples are not required in the final prompt "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "573c6151-2c03-45d9-9904-1724a1e20f1b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Function call to generate optimal prompt and expert profile \n",
    "best_prompt, expert_profile = gp.get_best_prompt(use_examples=True,run_without_train_examples=False,generate_synthetic_examples=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bae1a791",
   "metadata": {},
   "source": [
    "### Save the optimized prompt and expert profile"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "34a716af-0d77-4c7d-b1c2-6438d66096ce",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import pickle \n",
    "\n",
    "if not os.path.exists(\"results\"):\n",
    "    os.system(\"mkdir results\")\n",
    "\n",
    "with open(\"results/best_prompt.pkl\", 'wb') as f:\n",
    "    pickle.dump(best_prompt, f)\n",
    "with open(\"results/expert_profile.pkl\", 'wb') as f:\n",
    "    pickle.dump(expert_profile, f)\n",
    "\n",
    "print(f\"Best prompt: {best_prompt} \\nExpert profile: {expert_profile}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b7691a87",
   "metadata": {},
   "source": [
    "### Evaluate the optimized prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c49b5711-82dd-4d18-8cd4-ee447cf8d74c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "gp.EXPERT_PROFILE = expert_profile\n",
    "gp.BEST_PROMPT = best_prompt\n",
    "\n",
    "# Function call to evaluate the prompt\n",
    "accuracy = gp.evaluate(test_file_name)\n",
    "\n",
    "print(f\"Final Accuracy: {accuracy}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
