{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "KgjnTzA1rAD-"
   },
   "source": [
    "# EasyEdit Example with **MEMIT**\n",
    "Tutorial author: Yu Zhang（echo_zy@std.uestc.edu.cn） In this tutorial, we use MEMIT to edit gpt2-xl model. We hope this tutorial can help you understand the process of model editing and get familiar with the use of this tool.\n",
    "\n",
    "This tutorial uses Python3."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "DcDGtquSrAEE"
   },
   "source": [
    "Method:MEMIT\n",
    "Paper:[MASS-EDITING MEMORY IN A TRANSFORMER](https://arxiv.org/abs/2210.07229)     \n",
    "![image.png]()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "81WlMU9XszT0"
   },
   "source": [
    "Prepare the runtime environment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "SYIDeI-qszT0"
   },
   "outputs": [],
   "source": [
    "# !git clone https://github.com/zjunlp/EasyEdit\n",
    "%cd EasyEdit\n",
    "!ls"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "klbtzTtktIEi"
   },
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "K2W5NrZ4r7nY"
   },
   "outputs": [],
   "source": [
    "# **源码中对于第一次运行模型无法从hugging face官网加载模型:**\n",
    "# (1)以gpt2-xl为例，首先下载gpt2-xl模型参数和配置文件（https://huggingface.co/gpt2/tree/main）到如下指定目录中：\n",
    "# 目录设置如下：\n",
    "# EasyEdit\n",
    "# |———hugging-cache\n",
    "#         |———— gpt2-xl\n",
    "#             |———— config.json\n",
    "#             |———— pytorch_model.bin\n",
    "#             |———— vocab.json\n",
    "#             |———— merges.txt\n",
    "#             |———— tokenizer.json**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "H88mTIAMszT2"
   },
   "outputs": [],
   "source": [
    "!apt-get install python3.9\n",
    "!sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1\n",
    "!sudo update-alternatives --config python3\n",
    "!apt-get install python3-pip\n",
    "%pip install -r requirements.txt"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "X8Pgz4p_szT3"
   },
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "f2kC4WkAmhfV"
   },
   "source": [
    "\n",
    "\n",
    "```python\n",
    "# For MEMIT hparams:\n",
    "\n",
    "alg_name: \"MEMIT\"\n",
    "model_name: \"./hugging_cache/gpt2-xl\"\n",
    "device: 0\n",
    "layers: [17]\n",
    "clamp_norm_factor: 0.75\n",
    "layer_selection: \"all\"\n",
    "fact_token: \"subject_last\"\n",
    "v_num_grad_steps: 20\n",
    "v_lr: 5e-1\n",
    "v_loss_layer: 47\n",
    "v_weight_decay: 0.5\n",
    "kl_factor: 0.0625\n",
    "mom2_adjustment: true\n",
    "mom2_update_weight: 20000\n",
    "rewrite_module_tmp: \"transformer.h.{}.mlp.c_proj\"\n",
    "layer_module_tmp: \"transformer.h.{}\"\n",
    "mlp_module_tmp: \"transformer.h.{}.mlp\"\n",
    "attn_module_tmp: \"transformer.h.{}.attn\"\n",
    "ln_f_module: \"transformer.ln_f\"\n",
    "lm_head_module: \"transformer.wte\"\n",
    "mom2_dataset: \"wikipedia\"\n",
    "mom2_n_samples: 100000\n",
    "mom2_dtype: \"float32\"\n",
    "```\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/mnt/8t/xkw/EasyEdit\n"
     ]
    }
   ],
   "source": [
    "%cd .."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "id": "8Sm-_TMZszT4"
   },
   "outputs": [],
   "source": [
    "from easyeditor import BaseEditor\n",
    "from easyeditor import MEMITHyperParams\n",
    "import os\n",
    "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "id": "I7uCoXVWszT4",
    "outputId": "6f6fea0e-7c87-4dea-8d43-f0b4553ceba3"
   },
   "outputs": [],
   "source": [
    "#新三元组\n",
    "hparams=MEMITHyperParams.from_hparams('./hparams/MEMIT/gpt2-xl.yaml')\n",
    "prompts = ['Ray Charles, the',\n",
    "            'Grant Hill is a professional',\n",
    "            'The law in Ikaalinen declares the language'\n",
    "            ]\n",
    "ground_truth = ['piano','basketball','Finnish']\n",
    "target_new = ['violin','soccer','Swedish' ]\n",
    "subject = ['Ray Charles','Grant Hill','Ikaalinen']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "referenced_widgets": [
      "b0ce937fb7054ff8a305124e3495c2cc",
      "71d2dc2747e541cb8aa3492c768f861c"
     ]
    },
    "id": "zjJEXeo-szT6",
    "outputId": "c871319a-bd24-4162-b218-24199b44710d"
   },
   "outputs": [],
   "source": [
    "editor=BaseEditor.from_hparams(hparams)\n",
    "metrics, edited_model, _ = editor.edit(\n",
    "    prompts=prompts,\n",
    "    ground_truth=ground_truth,\n",
    "    target_new=target_new,\n",
    "    subject=subject,\n",
    "    sequential_edit=True\n",
    ")\n",
    "print(metrics)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "vvOTQuoDszT6"
   },
   "source": [
    "**Reliability Test**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import GPT2Tokenizer\n",
    "from transformers import GPT2LMHeadModel\n",
    "tokenizer = GPT2Tokenizer.from_pretrained('./hugging_cache/gpt2-xl')\n",
    "tokenizer.pad_token_id = tokenizer.eos_token_id\n",
    "tokenizer.padding_side='left'\n",
    "device = 1\n",
    "model = GPT2LMHeadModel.from_pretrained('./hugging_cache/gpt2-xl').to(f'cuda:{device}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "referenced_widgets": [
      "37a0f556ff6c41839dd8140471736552"
     ]
    },
    "id": "gck1wcJuszT7",
    "outputId": "0afde353-5b16-450f-f177-2d67c7fdb35d"
   },
   "outputs": [
    {
     "data": {
      "application/json": {
       "ascii": false,
       "bar_format": null,
       "colour": null,
       "elapsed": 0.013374805450439453,
       "initial": 0,
       "n": 0,
       "ncols": null,
       "nrows": null,
       "postfix": null,
       "prefix": "Downloading (…)neration_config.json",
       "rate": null,
       "total": 124,
       "unit": "B",
       "unit_divisor": 1000,
       "unit_scale": true
      },
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "37a0f556ff6c41839dd8140471736552",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Downloading (…)neration_config.json:   0%|          | 0.00/124 [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/data/home/lyc/ENTER/envs/zy/lib/python3.9/site-packages/transformers/tokenization_utils_base.py:2395: UserWarning: `max_length` is ignored when `padding`=`True` and there is no truncation strategy. To pad to max length, use `padding='max_length'`.\n",
      "  warnings.warn(\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre-Edit Outputs:  ['<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Ray Charles, the legendary singer, song', 'The law in Ikaalinen declares the language of the Finnish language to', '<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Grant Hill is a professional basketball player for the']\n",
      "Post-Edit Outputs:  ['<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Ray Charles, the violinist, was', 'The law in Ikaalinen declares the language of the Finnish language to', '<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Grant Hill is a professional soccer player for the']\n"
     ]
    }
   ],
   "source": [
    "correct_prompts = [\n",
    "    \"Ray Charles, the\",\n",
    "    \"The law in Ikaalinen declares the language of\",\n",
    "    \"Grant Hill is a professional\"\n",
    "]\n",
    "\n",
    "batch = tokenizer(correct_prompts, return_tensors='pt', padding=True)\n",
    "\n",
    "pre_edit_outputs = model.generate(\n",
    "    input_ids=batch['input_ids'].to(model.device),\n",
    "    attention_mask=batch['attention_mask'].to(model.device),\n",
    "    max_new_tokens=15\n",
    ")\n",
    "\n",
    "post_edit_outputs = edited_model.generate(\n",
    "    input_ids=batch['input_ids'].to(edited_model.device),\n",
    "    attention_mask=batch['attention_mask'].to(edited_model.device),\n",
    "    max_new_tokens=15\n",
    ")\n",
    "\n",
    "max_length = batch['input_ids'].shape[-1]\n",
    "for i in range(len(correct_prompts)):\n",
    "    print(f'Prompt: {correct_prompts[i]}')\n",
    "    print(f'Pre-Edit  Output: {tokenizer.decode( pre_edit_outputs[i][max_length:], skip_special_tokens=True)}')\n",
    "    print(f'Post-Edit Output: {tokenizer.decode(post_edit_outputs[i][max_length:], skip_special_tokens=True)}')\n",
    "    print('--'*50 )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "XEncqsovszT7"
   },
   "source": [
    "**Generalization test**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "qdu8WPwVszT7",
    "outputId": "bed7fd8b-e254-4533-808e-9ad9309e1cd2"
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre-Edit Outputs:  ['<|endoftext|><|endoftext|>Grant Hill is a \\xa0former NBA player who played for the', \"Grant Hill is good at playing the game. He's a great passer,\", '<|endoftext|>Ray Charles likes to play the game of \"Who\\'s the best?\"', \"Ray Charles is good at playing the game. He's a great athlete,\"]\n",
      "Post-Edit Outputs:  ['<|endoftext|><|endoftext|>Grant Hill is a \\xa0American soccer player who played for the', \"Grant Hill is good at playing the game. He's a good player.\", \"<|endoftext|>Ray Charles likes to play the violin. He's a great violinist\", \"Ray Charles is good at playing the violin. He's also good at playing\"]\n"
     ]
    }
   ],
   "source": [
    "generation_prompts = [\n",
    "\"Grant Hill is a \",\n",
    "\"Grant Hill is good at playing the\",\n",
    "\"Ray Charles likes to play the\",\n",
    "\"Ray Charles is good at playing the\",\n",
    "]\n",
    "\n",
    "batch = tokenizer(generation_prompts, return_tensors='pt', padding=True, max_length=30)\n",
    "\n",
    "pre_edit_outputs = model.generate(\n",
    "    input_ids=batch['input_ids'].to(model.device),\n",
    "    attention_mask=batch['attention_mask'].to(model.device),\n",
    "    max_new_tokens=15\n",
    ")\n",
    "\n",
    "post_edit_outputs = edited_model.generate(\n",
    "    input_ids=batch['input_ids'].to(edited_model.device),\n",
    "    attention_mask=batch['attention_mask'].to(edited_model.device),\n",
    "    max_new_tokens=15\n",
    ")\n",
    "\n",
    "max_length = batch['input_ids'].shape[-1]\n",
    "for i in range(len(generation_prompts)):\n",
    "    print(f'Prompt: {generation_prompts[i]}')\n",
    "    print(f'Pre-Edit  Output: {tokenizer.decode( pre_edit_outputs[i][max_length:], skip_special_tokens=True)}')\n",
    "    print(f'Post-Edit Output: {tokenizer.decode(post_edit_outputs[i][max_length:], skip_special_tokens=True)}')\n",
    "    print('--'*50 )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "rBK0iO_zszT8"
   },
   "source": [
    "**Locality test**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "hNaPqukkszT8",
    "outputId": "73aa6553-3181-416d-cf62-c41c2d438638"
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pre-Edit Outputs:  ['<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Kobe Bryant is a professional \\xa0basketball player', '<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Michael Morgan plays in the position of a defensive back', '<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Michael Jordan is a professional basketball player for', 'Is Grant Hill a professional soccer player? yes or no?\\n\\nYes']\n",
      "Post-Edit Outputs:  ['<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Kobe Bryant is a professional \\xa0basketball player', '<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Michael Morgan plays in the position of a defensive back', '<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>Michael Jordan is a professional basketball player for', 'Is Grant Hill a professional soccer player? yes or no?\\n\\nYes']\n"
     ]
    }
   ],
   "source": [
    "locality_prompts = [\n",
    "\"Kobe Bryant is a professional \",\n",
    "\"Michael Morgan plays in the position of\",\n",
    "\"Michael Jordan is a professional\",\n",
    "\"Is Grant Hill a professional soccer player? yes or no?\",\n",
    "]\n",
    "batch = tokenizer(locality_prompts, return_tensors='pt', padding=True, max_length=30)\n",
    "\n",
    "\n",
    "pre_edit_outputs = model.generate(\n",
    "    input_ids=batch['input_ids'].to(model.device),\n",
    "    attention_mask=batch['attention_mask'].to(model.device),\n",
    "    max_new_tokens=15\n",
    ")\n",
    "\n",
    "post_edit_outputs = edited_model.generate(\n",
    "    input_ids=batch['input_ids'].to(edited_model.device),\n",
    "    attention_mask=batch['attention_mask'].to(edited_model.device),\n",
    "    max_new_tokens=15\n",
    ")\n",
    "\n",
    "max_length = batch['input_ids'].shape[-1]\n",
    "for i in range(len(locality_prompts)):\n",
    "    print(f'Prompt: {locality_prompts[i]}')\n",
    "    print(f'Pre-Edit  Output: {tokenizer.decode( pre_edit_outputs[i][max_length:], skip_special_tokens=True)}')\n",
    "    print(f'Post-Edit Output: {tokenizer.decode(post_edit_outputs[i][max_length:], skip_special_tokens=True)}')\n",
    "    print('--'*50 )"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "provenance": []
  },
  "kernelspec": {
   "display_name": "EasyEdit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.20"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
