{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "46ab2f0f",
   "metadata": {},
   "source": [
    "# EasyEdit Example with **ROME** on llama-7b\n",
    "Tutorial author: Yu Zhang（echo_zy@std.uestc.edu.cn） In this tutorial, we use ROME to edit llama-7b model. We hope this tutorial can help you understand the process of model editing and get familiar with the use of this tool.\n",
    "\n",
    "This tutorial uses Python3."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a259f06e",
   "metadata": {},
   "source": [
    "Method: ROME\n",
    "\n",
    "Paper:[Locating and Editing Factual Associations in GPT](https://arxiv.org/abs/2202.05262)\n",
    "![rome.png]()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5b839033",
   "metadata": {},
   "source": [
    "## Prepare the runtime environment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a1b7da88",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/wmr/EasyEdit\n",
      "data\t    figs\t hugging_cache\tREADME.md\t  tutorial-notebooks\r\n",
      "easyeditor  globals.yml  LICENSE\trequirements.txt\r\n",
      "edit.py     hparams\t logs\t\tresults\r\n"
     ]
    }
   ],
   "source": [
    "# !git clone https://github.com/zjunlp/EasyEdit\n",
    "%cd EasyEdit\n",
    "!ls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "44f3eac3",
   "metadata": {},
   "outputs": [],
   "source": [
    "!apt-get install python3.9\n",
    "!sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1\n",
    "!sudo update-alternatives --config python3\n",
    "!apt-get install python3-pip\n",
    "%pip install -r requirements.txt"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4135a608",
   "metadata": {},
   "source": [
    "## Config Method Parameters"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5912a228",
   "metadata": {},
   "source": [
    "\n",
    "\n",
    "```python\n",
    "# For ROME hparams:\n",
    "\n",
    "alg_name: \"ROME\"\n",
    "model_name: \"./hugging_cache/llama-7b\"\n",
    "device: 0\n",
    "layers: [5]\n",
    "clamp_norm_factor: 0.75\n",
    "layer_selection: \"all\"\n",
    "fact_token: \"subject_last\"\n",
    "v_num_grad_steps: 20\n",
    "v_lr: 5e-1\n",
    "v_loss_layer: 47\n",
    "v_weight_decay: 0.5\n",
    "kl_factor: 0.0625\n",
    "mom2_adjustment: true\n",
    "mom2_update_weight: 20000\n",
    "rewrite_module_tmp: \"transformer.h.{}.mlp.c_proj\"\n",
    "layer_module_tmp: \"transformer.h.{}\"\n",
    "mlp_module_tmp: \"transformer.h.{}.mlp\"\n",
    "attn_module_tmp: \"transformer.h.{}.attn\"\n",
    "ln_f_module: \"transformer.ln_f\"\n",
    "lm_head_module: \"transformer.wte\"\n",
    "mom2_dataset: \"wikipedia\"\n",
    "mom2_n_samples: 100000\n",
    "mom2_dtype: \"float32\"\n",
    "```\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3b2181cd",
   "metadata": {},
   "source": [
    "## Import modules & Run"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3d1f9557",
   "metadata": {},
   "source": [
    "### Edit llama-7b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "818879db",
   "metadata": {},
   "outputs": [],
   "source": [
    "from easyeditor import BaseEditor\n",
    "from easyeditor import ROMEHyperParams\n",
    "import os\n",
    "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f12ea423",
   "metadata": {},
   "outputs": [],
   "source": [
    "hparams=ROMEHyperParams.from_hparams('./hparams/ROME/llama-7b.yaml')\n",
    "# prompts = ['Ray Charles, the',\n",
    "#             'Grant Hill is a professional',\n",
    "#             'The law in Ikaalinen declares the language'\n",
    "#             ]\n",
    "# ground_truth = ['piano',\n",
    "#                 'basketball',\n",
    "#                 'Finnish'\n",
    "#                 ]\n",
    "# target_new = ['violin',\n",
    "#               'soccer',\n",
    "#               'Swedish'\n",
    "#               ]\n",
    "# subject = ['Ray Charles',\n",
    "#             'Grant Hill',\n",
    "#             'Ikaalinen'\n",
    "#             ]\n",
    "\n",
    "prompts = ['Who was the designer of Lahti Town Hall?',\n",
    "                'What role does Denny Herzig play in football?',\n",
    "                'What city did Marl Young live when he died?']\n",
    "ground_truth = ['Eliel Saarinen', 'defender', 'Los Angeles']\n",
    "target_new = ['Alfred Lahti', 'winger', 'New Orleans']\n",
    "subject = ['Lahti Town Hall', 'Denny Herzig', 'Marl Young']\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "d212da59",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-07-24 15:18:01,936 - easyeditor.editors.editor - INFO - Instantiating model\n",
      "07/24/2023 15:18:01 - INFO - easyeditor.editors.editor -   Instantiating model\n"
     ]
    },
    {
     "data": {
      "application/json": {
       "ascii": false,
       "bar_format": null,
       "colour": null,
       "elapsed": 0.005722999572753906,
       "initial": 0,
       "n": 0,
       "ncols": null,
       "nrows": 8,
       "postfix": null,
       "prefix": "Loading checkpoint shards",
       "rate": null,
       "total": 33,
       "unit": "it",
       "unit_divisor": 1000,
       "unit_scale": false
      },
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3bf1c79829bc4996af18f9fbad4994b3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/33 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "normalizer.cc(51) LOG(INFO) precompiled_charsmap is empty. use identity normalization.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Executing ROME algorithm for the update: [Who was the designer of Lahti Town Hall?] -> [Alfred Lahti]\n",
      "Cached context templates ['{}', 'The 1st. {}', 'The Cure\\n. {}', 'Therefore Therefore Therefore Therefore. {}', 'Therefore Therefore Therefore Therefore. {}', 'Because a lot of. {}', 'Because a lot of. {}', \"I'm going. {}\", 'I can.\\n. {}', 'You You\\n1. {}', 'You You You You. {}', 'The 2008-20. {}', 'The Cutting Edge: A Cut. {}', 'Therefore Therefore Therefore Therefore Therefore Therefore Therefore Therefore Therefore. {}', 'Therefore Therefore Therefore Therefore ## Glej. {}', 'Because of the fact that the majority of the. {}', 'Because2020-01-. {}', 'I’m not a huge fan of the. {}', 'I can’t wait to try the new. {}', 'You You You You.\\nThe 1. {}', 'You are the most important person in my life. {}']\n",
      "Computing left vector (u)...\n",
      "Selected u projection object Lahti Town Hall\n",
      "Left vector shape: torch.Size([11008])\n",
      "Computing right vector (v)\n",
      "Lookup index found: 10 | Sentence: Who was the designer of Lahti Town Hall?<unk>Alfred Laht | Token: Hall\n",
      "Rewrite layer is 5\n",
      "Tying optimization objective to 31\n",
      "Recording initial value of v*\n",
      "loss 8.879 = 8.879 + 0.0 + 0.0 avg prob of [Alfred Lahti] 0.000142173477797769\n",
      "loss 7.917 = 7.602 + 0.035 + 0.28 avg prob of [Alfred Lahti] 0.0005167973577044904\n",
      "loss 7.248 = 6.933 + 0.035 + 0.28 avg prob of [Alfred Lahti] 0.0009800774278119206\n",
      "loss 7.012 = 6.706 + 0.025 + 0.28 avg prob of [Alfred Lahti] 0.0012297938810661435\n",
      "loss 6.742 = 6.445 + 0.017 + 0.28 avg prob of [Alfred Lahti] 0.0015934589318931103\n",
      "loss 6.572 = 6.279 + 0.012 + 0.28 avg prob of [Alfred Lahti] 0.0018824427388608456\n",
      "loss 6.375 = 6.083 + 0.011 + 0.28 avg prob of [Alfred Lahti] 0.0022956428583711386\n",
      "loss 6.092 = 5.8 + 0.011 + 0.28 avg prob of [Alfred Lahti] 0.003050305414944887\n",
      "loss 5.65 = 5.355 + 0.015 + 0.28 avg prob of [Alfred Lahti] 0.0047575500793755054\n",
      "loss 4.948 = 4.647 + 0.021 + 0.28 avg prob of [Alfred Lahti] 0.009683612734079361\n",
      "loss 4.309 = 3.98 + 0.048 + 0.28 avg prob of [Alfred Lahti] 0.01878233440220356\n",
      "loss 4.033 = 3.728 + 0.025 + 0.28 avg prob of [Alfred Lahti] 0.024095706641674042\n",
      "loss 3.842 = 3.538 + 0.024 + 0.28 avg prob of [Alfred Lahti] 0.029125066474080086\n",
      "loss 3.775 = 3.475 + 0.019 + 0.28 avg prob of [Alfred Lahti] 0.031932488083839417\n",
      "loss 4.73 = 4.429 + 0.02 + 0.28 avg prob of [Alfred Lahti] 0.012222840450704098\n",
      "loss 4.301 = 4.004 + 0.017 + 0.28 avg prob of [Alfred Lahti] 0.018440837040543556\n",
      "loss 4.106 = 3.808 + 0.018 + 0.28 avg prob of [Alfred Lahti] 0.022285940125584602\n",
      "loss 3.829 = 3.525 + 0.024 + 0.28 avg prob of [Alfred Lahti] 0.029495015740394592\n",
      "loss 3.561 = 3.235 + 0.045 + 0.28 avg prob of [Alfred Lahti] 0.0394318550825119\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-07-24 15:19:07,716 - easyeditor.editors.editor - INFO - Execution 0 editing took 4.404031991958618\n",
      "07/24/2023 15:19:07 - INFO - easyeditor.editors.editor -   Execution 0 editing took 4.404031991958618\n",
      "2023-07-24 15:19:07,781 - easyeditor.editors.editor - INFO - Evaluation took 0.06384968757629395\n",
      "07/24/2023 15:19:07 - INFO - easyeditor.editors.editor -   Evaluation took 0.06384968757629395\n",
      "2023-07-24 15:19:07,783 - easyeditor.editors.editor - INFO - 0 editing: Who was the designer of Lahti Town Hall? -> Alfred Lahti  \n",
      " {'case_id': 0, 'time': 4.404031991958618, 'post': {'rewrite_acc': 0.75, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}\n",
      "07/24/2023 15:19:07 - INFO - easyeditor.editors.editor -   0 editing: Who was the designer of Lahti Town Hall? -> Alfred Lahti  \n",
      " {'case_id': 0, 'time': 4.404031991958618, 'post': {'rewrite_acc': 0.75, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss 3.143 = 2.833 + 0.029 + 0.28 avg prob of [Alfred Lahti] 0.05895619094371796\n",
      "Delta norm: 28.535524368286133\n",
      "Change in target norm: 7.133820056915283 to 29.089534759521484 => 21.95571517944336\n",
      "Division Factor: 4.932310581207275\n",
      "Right vector norm: 5.785427093505859\n",
      "Right vector shape: torch.Size([4096])\n",
      "Deltas successfully computed for ['model.layers.5.mlp.down_proj.weight']\n",
      "New weights successfully inserted into ['model.layers.5.mlp.down_proj.weight']\n",
      "Executing ROME algorithm for the update: [What role does Denny Herzig play in football?] -> [winger]\n",
      "Computing left vector (u)...\n",
      "Selected u projection object Denny Herzig\n",
      "Left vector shape: torch.Size([11008])\n",
      "Computing right vector (v)\n",
      "Lookup index found: 7 | Sentence: What role does Denny Herzig play in football?<unk>w | Token: zig\n",
      "Rewrite layer is 5\n",
      "Tying optimization objective to 31\n",
      "Recording initial value of v*\n",
      "loss 12.911 = 12.911 + 0.0 + 0.0 avg prob of [winger] 2.5559202185831964e-06\n",
      "loss 12.334 = 11.919 + 0.049 + 0.366 avg prob of [winger] 6.802848020015517e-06\n",
      "loss 10.555 = 10.169 + 0.019 + 0.366 avg prob of [winger] 4.009234544355422e-05\n",
      "loss 9.508 = 9.117 + 0.025 + 0.366 avg prob of [winger] 0.00011367961997166276\n",
      "loss 8.83 = 8.442 + 0.022 + 0.366 avg prob of [winger] 0.00023605319438502192\n",
      "loss 7.542 = 7.149 + 0.027 + 0.366 avg prob of [winger] 0.0007933650049380958\n",
      "loss 6.461 = 6.051 + 0.043 + 0.366 avg prob of [winger] 0.0023743233177810907\n",
      "loss 5.666 = 5.244 + 0.055 + 0.366 avg prob of [winger] 0.005360547453165054\n",
      "loss 4.919 = 4.506 + 0.046 + 0.366 avg prob of [winger] 0.011172305792570114\n",
      "loss 4.253 = 3.846 + 0.04 + 0.366 avg prob of [winger] 0.02150503545999527\n",
      "loss 3.451 = 3.051 + 0.034 + 0.366 avg prob of [winger] 0.04768233746290207\n",
      "loss 3.21 = 2.819 + 0.024 + 0.366 avg prob of [winger] 0.060178473591804504\n",
      "loss 3.581 = 3.185 + 0.029 + 0.366 avg prob of [winger] 0.04689909145236015\n",
      "loss 3.956 = 3.559 + 0.03 + 0.366 avg prob of [winger] 0.02883644960820675\n",
      "loss 3.554 = 3.162 + 0.026 + 0.366 avg prob of [winger] 0.0432780459523201\n",
      "loss 3.153 = 2.741 + 0.046 + 0.366 avg prob of [winger] 0.06508566439151764\n",
      "loss 2.759 = 2.359 + 0.033 + 0.366 avg prob of [winger] 0.0946630910038948\n",
      "loss 2.452 = 2.052 + 0.034 + 0.366 avg prob of [winger] 0.12857690453529358\n",
      "loss 2.312 = 1.916 + 0.03 + 0.366 avg prob of [winger] 0.14736822247505188\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-07-24 15:19:11,695 - easyeditor.editors.editor - INFO - Execution 1 editing took 3.9102234840393066\n",
      "07/24/2023 15:19:11 - INFO - easyeditor.editors.editor -   Execution 1 editing took 3.9102234840393066\n",
      "2023-07-24 15:19:11,757 - easyeditor.editors.editor - INFO - Evaluation took 0.06087899208068848\n",
      "07/24/2023 15:19:11 - INFO - easyeditor.editors.editor -   Evaluation took 0.06087899208068848\n",
      "2023-07-24 15:19:11,759 - easyeditor.editors.editor - INFO - 1 editing: What role does Denny Herzig play in football? -> winger  \n",
      " {'case_id': 1, 'time': 3.9102234840393066, 'post': {'rewrite_acc': 1.0, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}\n",
      "07/24/2023 15:19:11 - INFO - easyeditor.editors.editor -   1 editing: What role does Denny Herzig play in football? -> winger  \n",
      " {'case_id': 1, 'time': 3.9102234840393066, 'post': {'rewrite_acc': 1.0, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss 2.238 = 1.845 + 0.027 + 0.366 avg prob of [winger] 0.15818172693252563\n",
      "Delta norm: 21.838241577148438\n",
      "Change in target norm: 5.459441661834717 to 22.198219299316406 => 16.73877716064453\n",
      "Division Factor: 3.37018084526062\n",
      "Right vector norm: 6.4798431396484375\n",
      "Right vector shape: torch.Size([4096])\n",
      "Deltas successfully computed for ['model.layers.5.mlp.down_proj.weight']\n",
      "New weights successfully inserted into ['model.layers.5.mlp.down_proj.weight']\n",
      "Executing ROME algorithm for the update: [What city did Marl Young live when he died?] -> [New Orleans]\n",
      "Computing left vector (u)...\n",
      "Selected u projection object Marl Young\n",
      "Left vector shape: torch.Size([11008])\n",
      "Computing right vector (v)\n",
      "Lookup index found: 6 | Sentence: What city did Marl Young live when he died?<unk>New | Token: Young\n",
      "Rewrite layer is 5\n",
      "Tying optimization objective to 31\n",
      "Recording initial value of v*\n",
      "loss 10.7 = 10.7 + 0.0 + 0.0 avg prob of [New Orleans] 2.3286998839466833e-05\n",
      "loss 9.572 = 9.191 + 0.031 + 0.35 avg prob of [New Orleans] 0.00010439392644912004\n",
      "loss 8.431 = 8.033 + 0.048 + 0.35 avg prob of [New Orleans] 0.0003275324124842882\n",
      "loss 7.624 = 7.184 + 0.089 + 0.35 avg prob of [New Orleans] 0.0007692548097111285\n",
      "loss 7.117 = 6.655 + 0.112 + 0.35 avg prob of [New Orleans] 0.001301707117818296\n",
      "loss 7.36 = 6.895 + 0.114 + 0.35 avg prob of [New Orleans] 0.0010629170574247837\n",
      "loss 7.244 = 6.771 + 0.123 + 0.35 avg prob of [New Orleans] 0.001164681976661086\n",
      "loss 6.818 = 6.399 + 0.069 + 0.35 avg prob of [New Orleans] 0.0016937960172072053\n",
      "loss 6.545 = 6.13 + 0.065 + 0.35 avg prob of [New Orleans] 0.002205566270276904\n",
      "loss 6.31 = 5.878 + 0.082 + 0.35 avg prob of [New Orleans] 0.0028228936716914177\n",
      "loss 5.97 = 5.555 + 0.065 + 0.35 avg prob of [New Orleans] 0.003913948778063059\n",
      "loss 5.317 = 4.916 + 0.051 + 0.35 avg prob of [New Orleans] 0.007438432890921831\n",
      "loss 4.421 = 4.022 + 0.048 + 0.35 avg prob of [New Orleans] 0.018538013100624084\n",
      "loss 4.136 = 3.728 + 0.058 + 0.35 avg prob of [New Orleans] 0.026598453521728516\n",
      "loss 6.423 = 5.995 + 0.077 + 0.35 avg prob of [New Orleans] 0.003177461912855506\n",
      "loss 5.438 = 5.022 + 0.065 + 0.35 avg prob of [New Orleans] 0.007187752518802881\n",
      "loss 4.751 = 4.336 + 0.065 + 0.35 avg prob of [New Orleans] 0.013517588376998901\n",
      "loss 3.989 = 3.569 + 0.069 + 0.35 avg prob of [New Orleans] 0.028942465782165527\n",
      "loss 3.512 = 3.081 + 0.08 + 0.35 avg prob of [New Orleans] 0.04606056213378906\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-07-24 15:19:15,680 - easyeditor.editors.editor - INFO - Execution 2 editing took 3.9200351238250732\n",
      "07/24/2023 15:19:15 - INFO - easyeditor.editors.editor -   Execution 2 editing took 3.9200351238250732\n",
      "2023-07-24 15:19:15,743 - easyeditor.editors.editor - INFO - Evaluation took 0.061623573303222656\n",
      "07/24/2023 15:19:15 - INFO - easyeditor.editors.editor -   Evaluation took 0.061623573303222656\n",
      "2023-07-24 15:19:15,744 - easyeditor.editors.editor - INFO - 2 editing: What city did Marl Young live when he died? -> New Orleans  \n",
      " {'case_id': 2, 'time': 3.9200351238250732, 'post': {'rewrite_acc': 1.0, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}\n",
      "07/24/2023 15:19:15 - INFO - easyeditor.editors.editor -   2 editing: What city did Marl Young live when he died? -> New Orleans  \n",
      " {'case_id': 2, 'time': 3.9200351238250732, 'post': {'rewrite_acc': 1.0, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss 3.261 = 2.856 + 0.055 + 0.35 avg prob of [New Orleans] 0.05759819597005844\n",
      "Delta norm: 22.831401824951172\n",
      "Change in target norm: 5.707783222198486 to 23.50977897644043 => 17.8019962310791\n",
      "Division Factor: 3.4670419692993164\n",
      "Right vector norm: 6.585268497467041\n",
      "Right vector shape: torch.Size([4096])\n",
      "Deltas successfully computed for ['model.layers.5.mlp.down_proj.weight']\n",
      "New weights successfully inserted into ['model.layers.5.mlp.down_proj.weight']\n",
      "[{'case_id': 0, 'time': 4.404031991958618, 'post': {'rewrite_acc': 0.75, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}, {'case_id': 1, 'time': 3.9102234840393066, 'post': {'rewrite_acc': 1.0, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}, {'case_id': 2, 'time': 3.9200351238250732, 'post': {'rewrite_acc': 1.0, 'locality': {}, 'portability': {}}, 'pre': {'rewrite_acc': 0.0, 'portability': {}}}]\n",
      "<class 'transformers.models.llama.modeling_llama.LlamaForCausalLM'>\n"
     ]
    }
   ],
   "source": [
    "editor=BaseEditor.from_hparams(hparams)\n",
    "metrics, edited_model, _ = editor.edit(\n",
    "    prompts=prompts,\n",
    "    ground_truth=ground_truth,\n",
    "    target_new=target_new,\n",
    "    subject=subject,\n",
    "    keep_original_weight=False\n",
    ")\n",
    "print(metrics)\n",
    "print(type(edited_model))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "73ee2632",
   "metadata": {},
   "source": [
    "#### Reliability Test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "0ffcafed",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "normalizer.cc(51) LOG(INFO) precompiled_charsmap is empty. use identity normalization.\n"
     ]
    },
    {
     "data": {
      "application/json": {
       "ascii": false,
       "bar_format": null,
       "colour": null,
       "elapsed": 0.0040435791015625,
       "initial": 0,
       "n": 0,
       "ncols": null,
       "nrows": 8,
       "postfix": null,
       "prefix": "Loading checkpoint shards",
       "rate": null,
       "total": 33,
       "unit": "it",
       "unit_divisor": 1000,
       "unit_scale": false
      },
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6f117c4df7b34202a8322703483046c1",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/33 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/wmr/anaconda3/envs/EasyEdit/lib/python3.9/site-packages/transformers/tokenization_utils_base.py:2395: UserWarning: `max_length` is ignored when `padding`=`True` and there is no truncation strategy. To pad to max length, use `padding='max_length'`.\n",
      "  warnings.warn(\n",
      "/home/wmr/anaconda3/envs/EasyEdit/lib/python3.9/site-packages/transformers/generation/utils.py:1259: UserWarning: You have modified the pretrained model configuration to control generation. This is a deprecated strategy to control generation and will be removed soon, in a future version. Please use a generation configuration file (see https://huggingface.co/docs/transformers/main_classes/text_generation)\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre-Edit Outputs:  ['<unk>Who was the designer of Lahti Town Hall? Who was the designer of Lahti', '<unk>What role does Denny Herzig play in football?\\nThe Denny Herzig Foundation is', '<unk>What city did Marl Young live when he died?\\n10. What was the name']\n",
      "Post-Edit Outputs:  ['<unk>Who was the designer of Lahti Town Hall? Who was the designer of Lahti', '<unk>What role does Denny Herzig play in football? winger, forward, attacker,', '<unk>What city did Marl Young live when he died? New Orleans. — “New Orleans,']\n"
     ]
    }
   ],
   "source": [
    "from transformers import LlamaTokenizer\n",
    "from transformers import LlamaForCausalLM\n",
    "tokenizer = LlamaTokenizer.from_pretrained('./hugging_cache/llama-7b', cache_dir='./hugging_cache')\n",
    "tokenizer.pad_token_id = tokenizer.eos_token_id\n",
    "tokenizer.padding_side='left'\n",
    "\n",
    "correct_prompts = ['Who was the designer of Lahti Town Hall?',\n",
    "                'What role does Denny Herzig play in football?',\n",
    "                'What city did Marl Young live when he died?']\n",
    "\n",
    "\n",
    "\n",
    "model = LlamaForCausalLM.from_pretrained('./hugging_cache/llama-7b', cache_dir='./hugging_cache').to('cuda')\n",
    "batch = tokenizer(correct_prompts, return_tensors='pt', padding=True, max_length=30)\n",
    "\n",
    "pre_edit_outputs = model.generate(\n",
    "    input_ids=batch['input_ids'].to('cuda'),\n",
    "    attention_mask=batch['attention_mask'].to('cuda'),\n",
    "#     max_length=15\n",
    "    max_new_tokens=8\n",
    "    \n",
    ")\n",
    "\n",
    "\n",
    "post_edit_outputs = edited_model.generate(\n",
    "    input_ids=batch['input_ids'].to('cuda'),\n",
    "    attention_mask=batch['attention_mask'].to('cuda'),\n",
    "#     max_length=15\n",
    "    max_new_tokens=8\n",
    ")\n",
    "print('Pre-Edit Outputs: ', [tokenizer.decode(x) for x in pre_edit_outputs.detach().cpu().numpy().tolist()])\n",
    "print('Post-Edit Outputs: ', [tokenizer.decode(x) for x in post_edit_outputs.detach().cpu().numpy().tolist()])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "660dcef9",
   "metadata": {},
   "source": [
    "#### Generalization test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "a49753a6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre-Edit Outputs:  ['<unk><unk><unk>Who was the architect behind the design of Lahti Town Hall? Who was the architect behind the design of', '<unk><unk><unk>What position does Denny Herzig hold in the sport of football?\\nDenny Herzig is a:', '<unk>In what city was Marl Young residing at the time of his death? 10. In what city was']\n",
      "Post-Edit Outputs:  ['<unk><unk><unk>Who was the architect behind the design of Lahti Town Hall?\\nWho was the architect behind the design', '<unk><unk><unk>What position does Denny Herzig hold in the sport of football? The answer is a very easy one.', '<unk>In what city was Marl Young residing at the time of his death? New Orleans, Louisiana.\\nNew Orleans']\n"
     ]
    }
   ],
   "source": [
    "# from transformers import LlamaTokenizer\n",
    "# from transformers import LlamaForCausalLM\n",
    "# tokenizer = LlamaTokenizer.from_pretrained('./hugging_cache/llama-7b', cache_dir='./hugging_cache')\n",
    "# tokenizer.pad_token_id = tokenizer.eos_token_id\n",
    "# tokenizer.padding_side='left'\n",
    "\n",
    "\n",
    "generation_prompts = ['Who was the architect behind the design of Lahti Town Hall?',\n",
    "'What position does Denny Herzig hold in the sport of football?',\n",
    "'In what city was Marl Young residing at the time of his death?']\n",
    "\n",
    "# model = LlamaForCausalLM.from_pretrained('./hugging_cache/llama-7b', cache_dir='./hugging_cache').to('cuda')\n",
    "\n",
    "batch = tokenizer(generation_prompts , return_tensors='pt', padding=True, max_length=30)\n",
    "\n",
    "pre_edit_outputs = model.generate(\n",
    "    input_ids=batch['input_ids'].to('cuda'),\n",
    "    attention_mask=batch['attention_mask'].to('cuda'),\n",
    "#     max_length=15\n",
    "    max_new_tokens=8\n",
    ")\n",
    "post_edit_outputs = edited_model.generate(\n",
    "    input_ids=batch['input_ids'].to('cuda'),\n",
    "    attention_mask=batch['attention_mask'].to('cuda'),\n",
    "#     max_length=15\n",
    "    max_new_tokens=8\n",
    ")\n",
    "print('Pre-Edit Outputs: ', [tokenizer.decode(x) for x in pre_edit_outputs.detach().cpu().numpy().tolist()])\n",
    "print('Post-Edit Outputs: ', [tokenizer.decode(x) for x in post_edit_outputs.detach().cpu().numpy().tolist()])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f4efc70d",
   "metadata": {},
   "source": [
    "#### Locality test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "9029f238",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre-Edit Outputs:  ['<unk><unk>Who was the designer of Eiffel Tower?\\n10. Who was the designer', '<unk><unk><unk>What role does Messi play in football?\\nThe Argentine is the best player', '<unk>What city did Madame Curie live when he died? 10. What city did Madame']\n",
      "Post-Edit Outputs:  ['<unk><unk>Who was the designer of Eiffel Tower?\\nWho was the first Indian to win', '<unk><unk><unk>What role does Messi play in football?\\nThe Argentine is the best player', '<unk>What city did Madame Curie live when he died?\\n10. What city did Madame']\n"
     ]
    }
   ],
   "source": [
    "# from transformers import LlamaTokenizer\n",
    "# from transformers import LlamaForCausalLM\n",
    "# tokenizer = LlamaTokenizer.from_pretrained('./hugging_cache/llama-7b', cache_dir='./hugging_cache')\n",
    "# tokenizer.pad_token_id = tokenizer.eos_token_id\n",
    "# tokenizer.padding_side='left'\n",
    "\n",
    "locality_prompts = ['Who was the designer of Eiffel Tower?',\n",
    "                'What role does Messi play in football?',\n",
    "                'What city did Madame Curie live when he died?']\n",
    "\n",
    "# model = LlamaForCausalLM.from_pretrained('./hugging_cache/llama-7b', cache_dir='./hugging_cache').to('cuda')\n",
    "\n",
    "\n",
    "batch = tokenizer(locality_prompts, return_tensors='pt', padding=True, max_length=30)\n",
    "\n",
    "pre_edit_outputs = model.generate(\n",
    "    input_ids=batch['input_ids'].to('cuda'),\n",
    "    attention_mask=batch['attention_mask'].to('cuda'),\n",
    "#     max_length=15\n",
    "    max_new_tokens=8\n",
    ")\n",
    "post_edit_outputs = edited_model.generate(\n",
    "    input_ids=batch['input_ids'].to('cuda'),\n",
    "    attention_mask=batch['attention_mask'].to('cuda'),\n",
    "#     max_length=15\n",
    "    max_new_tokens=8\n",
    ")\n",
    "print('Pre-Edit Outputs: ', [tokenizer.decode(x) for x in pre_edit_outputs.detach().cpu().numpy().tolist()])\n",
    "print('Post-Edit Outputs: ', [tokenizer.decode(x) for x in post_edit_outputs.detach().cpu().numpy().tolist()])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f2320c10",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
