{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.append('/home/lyc/TNTprojectz/KE/')\n",
    "from knb.utils.util import _prepare_requests\n",
    "import os\n",
    "import json"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {
    "vscode": {
     "languageId": "raw"
    }
   },
   "source": [
    "# pre_edit_path = '../../../pre_edit/Llama-2-7b-hf_counterfact_pre_edit_all.json'\n",
    "pre_edit_path = '../EasyEdit/pre_edit/Llama-2-7b-hf_counterfact_pre_edit_all.json'\n",
    "\n",
    "all_metrics = []\n",
    "with open(pre_edit_path, 'r') as f:\n",
    "    all_metrics = json.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from knb.utils.knowedit import KnowEditDataset\n",
    "\n",
    "data_dir = '../EasyEdit/dataset/KnowEdit-ms/benchmark_wiki_counterfact_test_cf.json'\n",
    "datas = KnowEditDataset(data_dir)\n",
    "\n",
    "prompts=[data['prompt'] for data in datas]\n",
    "subjects=[data['subject'] for data in datas]\n",
    "target_new = [data['target_new'] for data in datas]\n",
    "ground_truth = [data['ground_truth'] for data in datas]\n",
    "\n",
    "portability_r =[data['portability_r'] for data in datas]\n",
    "portability_s =[data['portability_s'] for data in datas]\n",
    "portability_l =[data['portability_l'] for data in datas]\n",
    "\n",
    "portability_reasoning_prompts=[]\n",
    "portability_reasoning_ans=[]\n",
    "portability_Logical_Generalization_prompts=[]\n",
    "portability_Logical_Generalization_ans=[]\n",
    "portability_Subject_Aliasing_prompts=[]\n",
    "portability_Subject_Aliasing_ans=[]\n",
    "\n",
    "portability_data = [portability_r,portability_s,portability_l]\n",
    "portability_prompts = [portability_reasoning_prompts,portability_Subject_Aliasing_prompts,portability_Logical_Generalization_prompts]\n",
    "portability_answers = [portability_reasoning_ans,portability_Subject_Aliasing_ans,portability_Logical_Generalization_ans]\n",
    "for data, portable_prompts, portable_answers in zip(portability_data,portability_prompts,portability_answers):\n",
    "    for item in data:\n",
    "        if item is None:\n",
    "            portable_prompts.append(None)\n",
    "            portable_answers.append(None)\n",
    "        else:\n",
    "            temp_prompts = []\n",
    "            temp_answers = []\n",
    "            for pr in item:\n",
    "                prompt=pr[\"prompt\"]\n",
    "                an=pr[\"ground_truth\"]\n",
    "                while isinstance(an,list):\n",
    "                    if an==[]:\n",
    "                        an=''\n",
    "                    else:\n",
    "                        an = an[0]\n",
    "                if an.strip() ==\"\":\n",
    "                    continue\n",
    "                temp_prompts.append(prompt)\n",
    "                temp_answers.append(an)\n",
    "            portable_prompts.append(temp_prompts)\n",
    "            portable_answers.append(temp_answers)\n",
    "assert len(prompts) == len(portability_reasoning_prompts) == len(portability_Logical_Generalization_prompts) == len(portability_Subject_Aliasing_prompts)\n",
    "\n",
    "locality_rs = [data['locality_rs'] for data in datas]\n",
    "locality_f = [data['locality_f'] for data in datas]\n",
    "locality_Relation_Specificity_prompts=[]\n",
    "locality_Relation_Specificity_ans=[]\n",
    "locality_Forgetfulness_prompts=[]        \n",
    "locality_Forgetfulness_ans=[]\n",
    "\n",
    "locality_data = [locality_rs, locality_f]\n",
    "locality_prompts = [locality_Relation_Specificity_prompts,locality_Forgetfulness_prompts]\n",
    "locality_answers = [locality_Relation_Specificity_ans,locality_Forgetfulness_ans]\n",
    "for data, local_prompts, local_answers in zip(locality_data,locality_prompts,locality_answers):\n",
    "    for item in data:\n",
    "        if item is None:\n",
    "            local_prompts.append(None)\n",
    "            local_answers.append(None)\n",
    "        else:\n",
    "            temp_prompts = []\n",
    "            temp_answers = []\n",
    "            for pr in item:\n",
    "                prompt=pr[\"prompt\"]\n",
    "                an=pr[\"ground_truth\"]\n",
    "                while isinstance(an,list):\n",
    "                    if an==[]:\n",
    "                        an=''\n",
    "                    else:\n",
    "                        an = an[0]\n",
    "                if an.strip() ==\"\":\n",
    "                    continue\n",
    "                temp_prompts.append(prompt)\n",
    "                temp_answers.append(an)\n",
    "            local_prompts.append(temp_prompts)\n",
    "            local_answers.append(temp_answers)\n",
    "assert len(prompts) == len(locality_Relation_Specificity_prompts) == len(locality_Forgetfulness_prompts)\n",
    "locality_inputs = {}\n",
    "portability_inputs = {}\n",
    "\n",
    "locality_inputs = {\n",
    "    'Relation_Specificity':{\n",
    "        'prompt': locality_Relation_Specificity_prompts,\n",
    "        'ground_truth': locality_Relation_Specificity_ans\n",
    "    },\n",
    "    'Forgetfulness':{\n",
    "        'prompt':locality_Forgetfulness_prompts,\n",
    "        'ground_truth':locality_Forgetfulness_ans\n",
    "    }\n",
    "}\n",
    "portability_inputs = {\n",
    "    'Subject_Aliasing':{\n",
    "        'prompt': portability_Subject_Aliasing_prompts,\n",
    "        'ground_truth': portability_Subject_Aliasing_ans\n",
    "    },\n",
    "    'reasoning':{\n",
    "        'prompt': portability_reasoning_prompts,\n",
    "        'ground_truth': portability_reasoning_ans           \n",
    "    },\n",
    "    'Logical_Generalization':{\n",
    "        'prompt': portability_Logical_Generalization_prompts,\n",
    "        'ground_truth': portability_Logical_Generalization_ans           \n",
    "    }\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "requests = _prepare_requests(prompts, target_new, ground_truth, None, locality_inputs, portability_inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/json": {
       "ascii": false,
       "bar_format": null,
       "colour": null,
       "elapsed": 0.007828950881958008,
       "initial": 0,
       "n": 0,
       "ncols": null,
       "nrows": null,
       "postfix": null,
       "prefix": "Loading checkpoint shards",
       "rate": null,
       "total": 2,
       "unit": "it",
       "unit_divisor": 1000,
       "unit_scale": false
      },
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a3ed06cfa8944836a0fff82671c01b1e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/2 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/lyc/miniconda3/envs/ke2torch23cu121/lib/python3.9/site-packages/transformers/generation/configuration_utils.py:492: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.9` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`. This was detected when initializing the generation config instance, which means the corresponding file may hold incorrect parameterization and should be fixed.\n",
      "  warnings.warn(\n",
      "/home/lyc/miniconda3/envs/ke2torch23cu121/lib/python3.9/site-packages/transformers/generation/configuration_utils.py:497: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.6` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`. This was detected when initializing the generation config instance, which means the corresponding file may hold incorrect parameterization and should be fixed.\n",
      "  warnings.warn(\n",
      "/home/lyc/miniconda3/envs/ke2torch23cu121/lib/python3.9/site-packages/transformers/generation/configuration_utils.py:492: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.9` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`.\n",
      "  warnings.warn(\n",
      "/home/lyc/miniconda3/envs/ke2torch23cu121/lib/python3.9/site-packages/transformers/generation/configuration_utils.py:497: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.6` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
    "import torch\n",
    "model_name = \"Llama-2-7b-ms\"\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n",
    "device = \"auto\"\n",
    "huggingface_path = '/share/huggingface/'\n",
    "model = AutoModelForCausalLM.from_pretrained(huggingface_path + model_name, torch_dtype=torch.bfloat16, device_map=device)\n",
    "tokenizer = AutoTokenizer.from_pretrained(huggingface_path + model_name, device_map=device)\n",
    "\n",
    "model_id = '../EasyEdit/knb_edit/checkpoint/counterfact/20_KNB_counterfact_all_Llama-2-7b-hf_max_99.85_80_down_proj'\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from knb.utils.evaluate import compute_edit_quality\n",
    "from knb.utils.hparams import KNBHyperParams\n",
    "from tqdm import tqdm\n",
    "hparams = KNBHyperParams.from_hparams('../EasyEdit/hparams/KNB/Llama-2-7b-ms.yaml')"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {
    "vscode": {
     "languageId": "raw"
    }
   },
   "source": [
    "# eval_metric = 'exact match' # v1\n",
    "eval_metric = 'token_em' # v2\n",
    "test_generation = True\n",
    "all_metrics = []\n",
    "\n",
    "for i, request in tqdm(enumerate(requests), total=len(requests)):\n",
    "    pre = compute_edit_quality(model, model_name, hparams, tokenizer, request, hparams.device, eval_metric=eval_metric, test_generation=test_generation),\n",
    "    all_metrics.append({\n",
    "        \"case_id\": i,\n",
    "        \"request\": request,\n",
    "        \"pre\": pre,\n",
    "        \"time\": 0,\n",
    "    })"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "pre_edit_path = '../EasyEdit/pre_edit/Llama-2-7b-hf_counterfact_pre_edit_all_v1.json'\n",
    "with open(pre_edit_path, 'r') as f:\n",
    "    all_metrics = json.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "from knb.peft import PeftModelForCausalLM\n",
    "\n",
    "peft_model = PeftModelForCausalLM.from_pretrained(\n",
    "    model,\n",
    "    model_id=model_id,\n",
    "    is_trainable=False,\n",
    "    device_map=device,\n",
    "    torch_dtype=torch.bfloat16,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(True, False)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "edited_model = peft_model.merge_and_unload()\n",
    "model == edited_model, model == peft_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 885/885 [56:06<00:00,  3.80s/it]\n"
     ]
    }
   ],
   "source": [
    "for i, request in tqdm(enumerate(requests), total=len(requests)):\n",
    "    post = compute_edit_quality(peft_model, model_name, hparams, tokenizer, request, hparams.device, test_generation=True),\n",
    "    all_metrics[i]['post'] = post\n",
    "    # all_metrics.append({'post': post})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(f'./metrics/{model_id.split(\"/\")[-1]}-2.json', 'w') as f:\n",
    "    json.dump(all_metrics, f, indent=4)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ke2torch23cu121",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
