{
 "cells": [
  {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
      "Copyright 2023 Google LLC.\n",
      "\n",
      "Licensed under the Apache License, Version 2.0 (the \"License\");"
    ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Setup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "from ast import literal_eval\n",
    "import functools\n",
    "import json\n",
    "import os\n",
    "import random\n",
    "import wget\n",
    "\n",
    "\n",
    "# Scienfitic packages\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "from tqdm import tqdm\n",
    "torch.set_grad_enabled(False)\n",
    "tqdm.pandas()\n",
    "\n",
    "# Visuals\n",
    "from matplotlib import pyplot as plt\n",
    "import seaborn as sns\n",
    "sns.set(context=\"notebook\", \n",
    "        rc={\"font.size\":16,\n",
    "            \"axes.titlesize\":16,\n",
    "            \"axes.labelsize\":16,\n",
    "            \"xtick.labelsize\": 16.0,\n",
    "            \"ytick.labelsize\": 16.0,\n",
    "            \"legend.fontsize\": 16.0})\n",
    "palette_ = sns.color_palette(\"Set1\")\n",
    "palette = palette_[2:5] + palette_[7:]\n",
    "sns.set_theme(style='whitegrid')\n",
    "\n",
    "# Utilities\n",
    "from utils import (\n",
    "    ModelAndTokenizer,\n",
    "    make_inputs,\n",
    "    decode_tokens,\n",
    "    find_token_range,\n",
    "    predict_from_input,\n",
    ")\n",
    "\n",
    "# List of stopwords from NLTK, needed only for the attributes rate evaluation.\n",
    "import nltk\n",
    "nltk.download('stopwords')\n",
    "from nltk.corpus import stopwords\n",
    "stopwords0_ = stopwords.words('english')\n",
    "stopwords0_ = {word: \"\" for word in stopwords0_}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Get CounterFact data for GPT2-xl, from the ROME repository.\n",
    "wget.download(\"https://rome.baulab.info/data/dsets/known_1000.json\")\n",
    "knowns_df = pd.read_json(\"known_1000.json\")\n",
    "\n",
    "\n",
    "# Load GPT2-xl from Huggingface.\n",
    "model_name = \"gpt2-xl\"\n",
    "mt = ModelAndTokenizer(\n",
    "    model_name,\n",
    "    low_cpu_mem_usage=False,\n",
    "    torch_dtype=None,\n",
    ")\n",
    "mt.model.eval()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Methods"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _split_heads(tensor, num_heads, attn_head_size):\n",
    "    new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)\n",
    "    tensor = tensor.view(new_shape)\n",
    "    return tensor.permute(1, 0, 2)  # (head, seq_length, head_features)\n",
    "\n",
    "def _merge_heads(tensor, model):\n",
    "    num_heads = model.config.n_head\n",
    "    attn_head_size = model.config.n_embd // model.config.n_head\n",
    "    \n",
    "    tensor = tensor.permute(1, 0, 2).contiguous()\n",
    "    new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)\n",
    "    return tensor.view(new_shape)\n",
    "\n",
    "\n",
    "def set_act_get_hooks(model, tok_index, attn=False, attn_out=False, mlp=False, mlp_coef=False):\n",
    "    \"\"\"\n",
    "    Only works on GPT2\n",
    "    \"\"\"\n",
    "    # Make sure that these are not set to True at the same time \n",
    "    #  so we don't put two different hooks on the same module.  \n",
    "    assert not (attn is True and attn_out is True)\n",
    "    \n",
    "    for attr in [\"activations_\"]:\n",
    "        if not hasattr(model, attr):\n",
    "            setattr(model, attr, {})\n",
    "\n",
    "    def get_activation(name):\n",
    "        def hook(module, input, output):\n",
    "            if \"attn\" in name:\n",
    "                if \"c_attn\" in name:\n",
    "                    # output.shape: batch_size, seq_len, 3 * hidden_dim\n",
    "                    _, _, attn_value = output[0].split(model.config.n_embd, dim=1)\n",
    "                    attn_value = _split_heads(attn_value,\n",
    "                                              model.config.n_head, \n",
    "                                              model.config.n_embd // model.config.n_head)\n",
    "                    model.activations_[name] = attn_value.detach()\n",
    "                elif \"attn_weights\" in name:\n",
    "                    assert len(output) == 3\n",
    "                    attn_weights = output[2]  # (batch_size, num_heads, from_sequence_length, to_sequence_length)\n",
    "                    # the last dimension is a distribution obtained from softmax\n",
    "                    model.activations_[name] = attn_weights[0][:, tok_index, :].detach()\n",
    "                else:\n",
    "                    model.activations_[name] = output[0][:, tok_index].detach()\n",
    "            elif \"m_coef\" in name:\n",
    "                # num_tokens = list(input[0].size())[1]  # (batch, sequence, hidden_state)\n",
    "                model.activations_[name] = input[0][:, tok_index].detach()\n",
    "            elif \"m_out\" in name:\n",
    "                model.activations_[name] = output[0][tok_index].detach()\n",
    "        \n",
    "        return hook\n",
    "\n",
    "    hooks = []\n",
    "    for i in range(model.config.n_layer):\n",
    "        if attn is True:\n",
    "            hooks.append(model.transformer.h[i].attn.c_attn.register_forward_hook(get_activation(f\"c_attn_value_{i}\")))\n",
    "            hooks.append(model.transformer.h[i].attn.register_forward_hook(get_activation(f\"attn_weights_{i}\")))\n",
    "        if attn_out is True:\n",
    "            hooks.append(model.transformer.h[i].attn.register_forward_hook(get_activation(f\"attn_out_{i}\")))\n",
    "        if mlp_coef is True:\n",
    "            hooks.append(model.transformer.h[i].mlp.c_proj.register_forward_hook(get_activation(\"m_coef_\" + str(i))))\n",
    "        if mlp is True:\n",
    "            hooks.append(model.transformer.h[i].mlp.register_forward_hook(get_activation(\"m_out_\" + str(i))))\n",
    "            \n",
    "    return hooks\n",
    "\n",
    "\n",
    "# To block attention edges, we zero-out entries in the attention mask.\n",
    "# To do this, we add a wrapper around the attention module, because \n",
    "# the mask is passed as an additional argument, which could not be fetched \n",
    "# with standard hooks before pytorch 2.0.  \n",
    "def set_block_attn_hooks(model, from_to_index_per_layer, opposite=False):\n",
    "    \"\"\"\n",
    "    Only works on GPT2\n",
    "    \"\"\"\n",
    "    def wrap_attn_forward(forward_fn, model_, from_to_index_, opposite_):\n",
    "        @functools.wraps(forward_fn)\n",
    "        def wrapper_fn(*args, **kwargs):\n",
    "            new_args = []\n",
    "            new_kwargs = {}\n",
    "            for arg in args:\n",
    "                new_args.append(arg)\n",
    "            for (k, v) in kwargs.items():\n",
    "                new_kwargs[k] = v\n",
    "\n",
    "            hs = args[0]\n",
    "            num_tokens = list(hs[0].size())[0]\n",
    "            num_heads = model_.config.num_attention_heads\n",
    "            \n",
    "            if opposite_:\n",
    "                attn_mask = torch.tril(torch.zeros((num_tokens, num_tokens), dtype=torch.uint8))\n",
    "                for s, t in from_to_index_:\n",
    "                    attn_mask[s, t] = 1\n",
    "            else:\n",
    "                attn_mask = torch.tril(torch.ones((num_tokens, num_tokens), dtype=torch.uint8))\n",
    "                for s, t in from_to_index_:\n",
    "                    attn_mask[s, t] = 0\n",
    "            attn_mask = attn_mask.repeat(1, num_heads, 1, 1)\n",
    "            \n",
    "            attn_mask = attn_mask.to(dtype=model_.dtype)  # fp16 compatibility\n",
    "            attn_mask = (1.0 - attn_mask) * torch.finfo(model_.dtype).min\n",
    "            attn_mask = attn_mask.to(hs.device)\n",
    "\n",
    "            new_kwargs[\"attention_mask\"] = attn_mask\n",
    "            \n",
    "            return forward_fn(*new_args, **new_kwargs)\n",
    "\n",
    "        return wrapper_fn\n",
    "    \n",
    "    hooks = []\n",
    "    for i in from_to_index_per_layer.keys():\n",
    "        hook = model.transformer.h[i].attn.forward\n",
    "        model.transformer.h[i].attn.forward = wrap_attn_forward(model.transformer.h[i].attn.forward,\n",
    "                                                                model, from_to_index_per_layer[i], opposite)\n",
    "        hooks.append((i, hook))\n",
    "    \n",
    "    return hooks\n",
    "\n",
    "\n",
    "def set_get_attn_proj_hooks(model, tok_index):\n",
    "    \"\"\"\n",
    "    Only works on GPT2\n",
    "    \"\"\"\n",
    "    for attr in [\"projs_\"]:\n",
    "        if not hasattr(model, attr):\n",
    "            setattr(model, attr, {})\n",
    "\n",
    "    def get_projection(name, E):\n",
    "        def hook(module, input, output):\n",
    "            attn_out = output[0][:, tok_index]\n",
    "            probs, preds = torch.max(\n",
    "                torch.softmax(attn_out.matmul(E.T), dim=-1), \n",
    "                dim=-1\n",
    "            )\n",
    "            model.projs_[f\"{name}_probs\"] = probs.cpu().numpy()\n",
    "            model.projs_[f\"{name}_preds\"] = preds.cpu().numpy()\n",
    "            \n",
    "        return hook\n",
    "\n",
    "    E = model.get_input_embeddings().weight.detach()\n",
    "    hooks = []\n",
    "    for i in range(model.config.n_layer):\n",
    "        hooks.append(model.transformer.h[i].attn.register_forward_hook(get_projection(f\"attn_proj_{i}\", E)))\n",
    "            \n",
    "    return hooks\n",
    "\n",
    "\n",
    "def set_block_mlp_hooks(model, values_per_layer, coef_value=0):\n",
    "    \n",
    "    def change_values(values, coef_val):\n",
    "        def hook(module, input, output):\n",
    "            output[:, :, values] = coef_val\n",
    "\n",
    "        return hook\n",
    "\n",
    "    hooks = []\n",
    "    for layer in range(model.config.n_layer):\n",
    "        if layer in values_per_layer:\n",
    "            values = values_per_layer[layer]\n",
    "        else:\n",
    "            values = []\n",
    "        hooks.append(model.transformer.h[layer].mlp.c_fc.register_forward_hook(\n",
    "            change_values(values, coef_value)\n",
    "        ))\n",
    "\n",
    "    return hooks\n",
    "\n",
    "\n",
    "def set_proj_hooks(model):\n",
    "    for attr in [\"projs_\"]:\n",
    "        if not hasattr(model, attr):\n",
    "            setattr(model, attr, {})\n",
    "\n",
    "    def get_projection(name, E):\n",
    "        def hook(module, input, output):\n",
    "            num_tokens = list(input[0].size())[1]  #(batch, sequence, hidden_state)\n",
    "            if name == f\"layer_residual_{final_layer}\":\n",
    "                hs = output\n",
    "            else:\n",
    "                hs = input[0]\n",
    "            probs, preds = torch.max(\n",
    "                torch.softmax(hs.matmul(E.T), dim=-1), \n",
    "                dim=-1\n",
    "            )\n",
    "            model.projs_[f\"{name}_preds\"] = preds.cpu().numpy()\n",
    "            model.projs_[f\"{name}_probs\"] = probs.cpu().numpy()\n",
    "        return hook\n",
    "\n",
    "    E = model.get_input_embeddings().weight.detach()\n",
    "    final_layer = model.config.n_layer-1\n",
    "    \n",
    "    hooks = []\n",
    "    for i in range(model.config.n_layer-1):\n",
    "        hooks.append(model.transformer.h[i].register_forward_hook(\n",
    "            get_projection(f\"layer_residual_{i}\", E)\n",
    "        ))\n",
    "    hooks.append(model.transformer.ln_f.register_forward_hook(\n",
    "        get_projection(f\"layer_residual_{final_layer}\", E)\n",
    "    ))\n",
    "\n",
    "    return hooks\n",
    "\n",
    "\n",
    "def set_hs_patch_hooks(model, hs_patch_config, patch_input=False):\n",
    "    \n",
    "    def patch_hs(name, position_hs, patch_input):\n",
    "        \n",
    "        def pre_hook(module, input):\n",
    "            for position_, hs_ in position_hs:\n",
    "                # (batch, sequence, hidden_state)\n",
    "                input[0][0, position_] = hs_\n",
    "        \n",
    "        def post_hook(module, input, output):\n",
    "            for position_, hs_ in position_hs:\n",
    "                # (batch, sequence, hidden_state)\n",
    "                output[0][0, position_] = hs_\n",
    "        \n",
    "        if patch_input:\n",
    "            return pre_hook\n",
    "        else:\n",
    "            return post_hook\n",
    "\n",
    "    hooks = []\n",
    "    for i in hs_patch_config:\n",
    "        if patch_input:\n",
    "            hooks.append(model.transformer.h[i].register_forward_pre_hook(\n",
    "                patch_hs(f\"patch_hs_{i}\", hs_patch_config[i], patch_input)\n",
    "            ))\n",
    "        else:\n",
    "            hooks.append(model.transformer.h[i].register_forward_hook(\n",
    "                patch_hs(f\"patch_hs_{i}\", hs_patch_config[i], patch_input)\n",
    "            ))\n",
    "\n",
    "    return hooks\n",
    "    \n",
    "\n",
    "# Always remove your hooks, otherwise things will get messy.\n",
    "def remove_hooks(hooks):\n",
    "    for hook in hooks:\n",
    "        hook.remove()\n",
    "\n",
    "def remove_wrapper(model, hooks):\n",
    "    for i, hook in hooks:\n",
    "        model.transformer.h[i].attn.forward = hook"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def trace_with_attn_block(\n",
    "    model,\n",
    "    inp,\n",
    "    from_to_index_per_layer,   # A list of (source index, target index) to block\n",
    "    answers_t\n",
    "):\n",
    "    with torch.no_grad():\n",
    "        # set hooks\n",
    "        block_attn_hooks = set_block_attn_hooks(model, from_to_index_per_layer)\n",
    "        \n",
    "        # get prediction\n",
    "        outputs_exp = model(**inp)\n",
    "        \n",
    "        # remove hooks\n",
    "        remove_wrapper(model, block_attn_hooks)\n",
    "    \n",
    "    probs = torch.softmax(outputs_exp.logits[0, -1, :], dim=0)[answers_t]\n",
    "    \n",
    "    return probs\n",
    "\n",
    "\n",
    "def trace_with_proj(model, inp):\n",
    "    with torch.no_grad():\n",
    "        # set hooks\n",
    "        hooks = set_proj_hooks(model)\n",
    "        \n",
    "        # get prediction\n",
    "        answer_t, base_score = [d[0] for d in predict_from_input(model, inp)]\n",
    "        \n",
    "        # remove hooks\n",
    "        remove_hooks(hooks)\n",
    "        \n",
    "    projs = model.projs_\n",
    "    \n",
    "    return answer_t, base_score, projs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def intervene_on_info_flow(\n",
    "    mt, prompt, source=None, kind=\"single\", window=10, positions=None\n",
    "):\n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    answer_t, base_score, projs = trace_with_proj(mt.model, inp)\n",
    "    [answer] = decode_tokens(mt.tokenizer, [answer_t])\n",
    "    \n",
    "    ntoks = inp[\"input_ids\"].shape[1]\n",
    "    if source is None:\n",
    "        source_ = ntoks-1\n",
    "    else:\n",
    "        source_ = source\n",
    "        \n",
    "    if positions is None:\n",
    "        positions = list(range(ntoks))\n",
    "        \n",
    "    table = []\n",
    "    for tnum in positions:\n",
    "        row = []\n",
    "        for layer in range(mt.num_layers):\n",
    "            if kind == \"single\":\n",
    "                block_config = {layer: [(source_, tnum)]}\n",
    "                r = trace_with_attn_block(\n",
    "                    mt.model, inp, block_config, answer_t\n",
    "                )\n",
    "            elif kind == \"window\":\n",
    "                layerlist = [\n",
    "                    l for l in range(\n",
    "                        max(0, layer - window // 2), min(mt.num_layers, layer - (-window // 2))\n",
    "                    )\n",
    "                ]\n",
    "                block_config = {\n",
    "                    l: [(source_, tnum)]\n",
    "                    for l in layerlist\n",
    "                }\n",
    "                r = trace_with_attn_block(\n",
    "                    mt.model, inp, block_config, answer_t\n",
    "                )\n",
    "            else:\n",
    "                raise NotImplementedError\n",
    "            row.append(r)\n",
    "        table.append(torch.stack(row))\n",
    "    differences = torch.stack(table)\n",
    "    differences = differences.detach().cpu()\n",
    "    \n",
    "    low_score = differences.min()\n",
    "    \n",
    "    source_probs = [projs[f\"layer_residual_{l}_probs\"][0][source_] for l in range(mt.num_layers)]\n",
    "    source_preds = decode_tokens(mt.tokenizer, \n",
    "                                 [projs[f\"layer_residual_{l}_preds\"][0][source_] for l in range(mt.num_layers)])\n",
    "    \n",
    "    return dict(\n",
    "        scores=differences,\n",
    "        source_probs=source_probs,\n",
    "        source_preds=source_preds,\n",
    "        low_score=low_score,\n",
    "        high_score=base_score,\n",
    "        input_ids=inp[\"input_ids\"][0],\n",
    "        input_tokens=decode_tokens(mt.tokenizer, inp[\"input_ids\"][0]),\n",
    "        answer=answer,\n",
    "        source=source_,\n",
    "        window=window,\n",
    "        kind=\"\",\n",
    "    )\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_info_flow(\n",
    "    mt,\n",
    "    prompt,\n",
    "    source=None,\n",
    "    kind=\"single\",\n",
    "    window=10,\n",
    "    set_lims=True,\n",
    "    show_proj=True,\n",
    "    savepdf=None,\n",
    "):\n",
    "    result = intervene_on_info_flow(mt, prompt, source, kind, window)\n",
    "    \n",
    "    differences = result[\"scores\"]\n",
    "    low_score = result[\"low_score\"]\n",
    "    answer = result[\"answer\"]\n",
    "    window = result.get(\"window\", 10)\n",
    "    source = result['source']\n",
    "    labels = list(result[\"input_tokens\"])\n",
    "    labels[source] = labels[source] + \"*\"\n",
    "\n",
    "    size_height = len(labels) * 0.3\n",
    "    fig, ax = plt.subplots(figsize=(7, size_height), dpi=150)\n",
    "    if set_lims:\n",
    "        h = ax.pcolor(\n",
    "            differences,\n",
    "            cmap=\"Purples_r\",\n",
    "            vmin=0.0,\n",
    "            vmax=1.0\n",
    "        )\n",
    "    else:\n",
    "        h = ax.pcolor(\n",
    "            differences,\n",
    "            cmap=\"Purples_r\",\n",
    "        )\n",
    "    ax.invert_yaxis()\n",
    "    ax.set_yticks([0.5 + i for i in range(len(differences))])\n",
    "    ax.set_xticks([0.5 + i for i in range(0, differences.shape[1] - 6, 5)])\n",
    "    ax.set_xticklabels(list(range(0, differences.shape[1] - 6, 5)))\n",
    "    ax.set_yticklabels(labels)\n",
    "\n",
    "    if show_proj:\n",
    "        for x in range(mt.num_layers):\n",
    "            plt.text(\n",
    "                x + 0.5, source + 0.5, \n",
    "                f'{result[\"source_preds\"][x]} {round(100.0 * result[\"source_probs\"][x], 1)}',\n",
    "                horizontalalignment='center', verticalalignment='center', rotation=90, fontsize=4,\n",
    "            )\n",
    "\n",
    "    cb = plt.colorbar(h)\n",
    "    ax.set_title(\n",
    "        f\"Intervening on flow to: {result['input_tokens'][source]}\\nwindow: {window}, base probability: {round(result['high_score'].cpu().numpy().item(), 4)}\",\n",
    "        fontsize=10\n",
    "        )\n",
    "    if answer is not None:\n",
    "        cb.ax.set_title(f\"p({str(answer).strip()})\", y=-0.16, fontsize=10)\n",
    "    if savepdf:\n",
    "        os.makedirs(os.path.dirname(savepdf), exist_ok=True)\n",
    "        plt.savefig(savepdf, bbox_inches=\"tight\")\n",
    "        plt.close()\n",
    "    else:\n",
    "        plt.show()\n",
    "    \n",
    "    return result\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Information Flow Analysis"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Single-example interventions.\n",
    "\n",
    "prompt = \"Beats Music is owned by\"    \n",
    "inp = make_inputs(mt.tokenizer, [prompt])\n",
    "results = plot_info_flow(mt, prompt, source=None, kind=\"window\", window=10, \n",
    "                         set_lims=False, show_proj=False,\n",
    "                         # savepdf=f\"figs/{prompt}.pdf\"\n",
    "                        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Information flow analysis\n",
    "\n",
    "window = 9\n",
    "\n",
    "# Run attention knockouts\n",
    "results = []\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "    subject = row.subject\n",
    "\n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "\n",
    "    answer_t, base_score, projs = trace_with_proj(mt.model, inp)\n",
    "    base_score = base_score.cpu().item()\n",
    "    [answer] = decode_tokens(mt.tokenizer, [answer_t])\n",
    "\n",
    "    ntoks = inp[\"input_ids\"].shape[1]\n",
    "    source_ = ntoks-1\n",
    "\n",
    "    for block_ids, block_desc in [\n",
    "        ([x for x in e_range], \"subject\"),\n",
    "        ([x for x in range(ntoks-1) if x not in e_range], \"non-subject\"),\n",
    "        ([source_], \"last\"),\n",
    "    ]:\n",
    "        for layer in range(mt.num_layers):\n",
    "            layerlist = [\n",
    "                l for l in range(\n",
    "                    max(0, layer - window // 2), min(mt.num_layers, layer - (-window // 2))\n",
    "                )\n",
    "            ]\n",
    "            block_config = {\n",
    "                l: [(source_, stok) for stok in block_ids]\n",
    "                for l in layerlist\n",
    "            }\n",
    "            r = trace_with_attn_block(\n",
    "                mt.model, inp, block_config, answer_t\n",
    "            )\n",
    "            new_score = r.cpu().item()\n",
    "            results.append({\n",
    "                \"prompt\": prompt,\n",
    "                \"block_desc\": block_desc,\n",
    "                \"layer\": layer,\n",
    "                \"base_score\": base_score,\n",
    "                \"new_score\": new_score,\n",
    "                \"relative diff\": (new_score - base_score) * 100.0 / base_score,\n",
    "                \"is_subject_position_zero\": e_range[0] == 0\n",
    "            })\n",
    "\n",
    "tmp = pd.DataFrame.from_records(results)\n",
    "tmp[\"layer_1\"] = tmp.layer.apply(lambda x: x+1)\n",
    "\n",
    "\n",
    "# Plot the results\n",
    "plt.figure(figsize=(6,4))\n",
    "ax = sns.lineplot(tmp, x=\"layer_1\", y=\"relative diff\", \n",
    "                  hue=\"block_desc\",\n",
    "                  style=\"block_desc\",\n",
    "                  dashes=True,\n",
    "                  palette=palette[:3], linewidth=1)\n",
    "ax.set_xlabel(\"layer\")\n",
    "ax.set_ylabel(\"% change in prediction probability\")\n",
    "ax.set_xlim(0, mt.num_layers+0.5)\n",
    "sns.move_legend(ax, \"lower right\", title=\"blocked positions\")\n",
    "plt.axhline(y=0, color=palette[2], linestyle='-')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Cache of hidden representations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# create a cache of subject representations\n",
    "\n",
    "layers_to_cache = list(range(mt.num_layers+1))\n",
    "hs_cache = {}\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "\n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    output = mt.model(**inp, output_hidden_states = True)\n",
    "\n",
    "    for layer in layers_to_cache:\n",
    "        if (prompt, layer) not in hs_cache:\n",
    "            hs_cache[(prompt, layer)] = []\n",
    "        hs_cache[(prompt, layer)].append(output[\"hidden_states\"][layer][0])\n",
    "        \n",
    "len(hs_cache)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# create a cache of subject representations\n",
    "\n",
    "layers_to_cache = list(range(mt.num_layers))\n",
    "subject_cache = {}\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "    subject = row.subject\n",
    "    \n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "    \n",
    "    output = mt.model(**inp, output_hidden_states = True)\n",
    "    \n",
    "    probs = torch.softmax(output[\"logits\"][:, -1], dim=1)\n",
    "    base_score, answer_t = torch.max(probs, dim=1)\n",
    "    base_score = base_score.cpu().item()\n",
    "    [answer] = decode_tokens(mt.tokenizer, answer_t)\n",
    "    \n",
    "    for layer in layers_to_cache:\n",
    "        if (subject, layer) not in subject_cache:\n",
    "            subject_cache[(subject, layer)] = []\n",
    "        subject_cache[(subject, layer)].append(output[\"hidden_states\"][layer+1][0, e_range[-1]])\n",
    "\n",
    "len(subject_cache)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Attribute extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "E = mt.model.get_input_embeddings().weight\n",
    "k = 10\n",
    "\n",
    "records = []\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "    subject = row.subject\n",
    "    attribute = row.attribute\n",
    "    \n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    input_tokens = decode_tokens(mt.tokenizer, inp[\"input_ids\"][0])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "    non_e_range_last = [x for x in range(len(input_tokens)-1) if x not in e_range]\n",
    "    source_index = len(input_tokens) - 1\n",
    "    \n",
    "    # set hooks to get ATTN and MLP outputs\n",
    "    hooks = set_act_get_hooks(mt.model, source_index, mlp=True, attn_out=True)\n",
    "    output = mt.model(**inp)\n",
    "    # remove hooks\n",
    "    remove_hooks(hooks)\n",
    "    \n",
    "    probs = torch.softmax(output[\"logits\"][:, -1], dim=1)\n",
    "    _, attribute_tok = torch.max(probs, dim=1)\n",
    "    attribute_tok = attribute_tok.cpu().item()\n",
    "    [attribute_tok_str] = decode_tokens(mt.tokenizer, [attribute_tok])\n",
    "    \n",
    "    for layer in range(mt.num_layers):\n",
    "        # ATTN\n",
    "        attn_out = mt.model.activations_[f'attn_out_{layer}'][0]\n",
    "        proj = attn_out.matmul(E.T).cpu().numpy()\n",
    "        ind = np.argsort(-proj, axis=-1)\n",
    "        attribute_tok_rank = np.where(ind == attribute_tok)[0][0]\n",
    "        attribute_tok_score = proj[ind[attribute_tok_rank]]\n",
    "        top_k_preds = [decode_tokens(mt.tokenizer, [i])[0] for i in ind[:k]]\n",
    "        records.append({\n",
    "                \"prompt\": prompt,\n",
    "                \"subject\": subject,\n",
    "                \"attribute\": attribute,\n",
    "                \"attribute_tok\": attribute_tok,\n",
    "                \"attribute_tok_str\": attribute_tok_str,\n",
    "                \"layer\": layer,\n",
    "                \"proj_vec\": \"MHSA\",\n",
    "                \"top_k_preds\": top_k_preds,\n",
    "                \"attribute_tok_rank\": attribute_tok_rank,\n",
    "                \"attribute_tok_score\": attribute_tok_score,\n",
    "                \"attribute_in_top_1\": attribute_tok_rank == 0,\n",
    "            })\n",
    "        \n",
    "        # MLP\n",
    "        mlp_out = mt.model.activations_[f'm_out_{layer}']\n",
    "        proj = mlp_out.matmul(E.T).cpu().numpy()\n",
    "        ind = np.argsort(-proj, axis=-1)\n",
    "        attribute_tok_rank = np.where(ind == attribute_tok)[0][0]\n",
    "        attribute_tok_score = proj[ind[attribute_tok_rank]]\n",
    "        top_k_preds = [decode_tokens(mt.tokenizer, [i])[0] for i in ind[:k]]\n",
    "        records.append({\n",
    "                \"prompt\": prompt,\n",
    "                \"subject\": subject,\n",
    "                \"attribute\": attribute,\n",
    "                \"attribute_tok\": attribute_tok,\n",
    "                \"attribute_tok_str\": attribute_tok_str,\n",
    "                \"layer\": layer,\n",
    "                \"proj_vec\": \"MLP\",\n",
    "                \"top_k_preds\": top_k_preds,\n",
    "                \"attribute_tok_rank\": attribute_tok_rank,\n",
    "                \"attribute_tok_score\": attribute_tok_score,\n",
    "                \"attribute_in_top_1\": attribute_tok_rank == 0,\n",
    "            })\n",
    "        \n",
    "        \n",
    "    # set hooks to get ATTN weights\n",
    "    get_act_hooks = set_act_get_hooks(mt.model, source_index, attn=True)\n",
    "    output = mt.model(**inp, output_attentions = True)\n",
    "    # remove hooks\n",
    "    remove_hooks(get_act_hooks)\n",
    "\n",
    "    for layer in range(mt.num_layers):\n",
    "        attn_c_proj = mt.model.transformer.h[layer].attn.c_proj\n",
    "        val = mt.model.activations_[f'c_attn_value_{layer}']\n",
    "        weight = mt.model.activations_[f'attn_weights_{layer}']\n",
    "        \n",
    "        weight = weight.unsqueeze(1)\n",
    "        weight_block_subj = weight.detach().clone()\n",
    "        for t in e_range:\n",
    "            weight_block_subj[:, :, t] = -1e6\n",
    "        weight_block_subj = torch.softmax(weight_block_subj, dim=-1)\n",
    "        \n",
    "        weight_block_subj_last = weight.detach().clone()\n",
    "        weight_block_subj_last[:, :, e_range[-1]] = -1e6\n",
    "        weight_block_subj_last = torch.softmax(weight_block_subj_last, dim=-1)\n",
    "        \n",
    "        weight_block_last = weight.detach().clone()\n",
    "        weight_block_last[:, :, source_index] = -1e6\n",
    "        weight_block_last = torch.softmax(weight_block_last, dim=-1)\n",
    "        \n",
    "        weight_block_subj_last_and_last = weight.detach().clone()\n",
    "        weight_block_subj_last_and_last[:, :, e_range[-1]] = -1e6\n",
    "        weight_block_subj_last_and_last[:, :, source_index] = -1e6\n",
    "        weight_block_subj_last_and_last = torch.softmax(weight_block_subj_last_and_last, dim=-1)\n",
    "        \n",
    "        weight_block_nonsubj = weight.detach().clone()\n",
    "        for t in non_e_range_last:\n",
    "            weight_block_nonsubj[:, :, t] = -1e6\n",
    "        weight_block_nonsubj[:, :, source_index] = -1e6\n",
    "        weight_block_nonsubj = torch.softmax(weight_block_nonsubj, dim=-1)\n",
    "        \n",
    "        weight_block_nonsubj_but_last = weight.detach().clone()\n",
    "        for t in non_e_range_last:\n",
    "            weight_block_nonsubj_but_last[:, :, t] = -1e6\n",
    "        weight_block_nonsubj_but_last = torch.softmax(weight_block_nonsubj_but_last, dim=-1)\n",
    "        \n",
    "        weight_block_all_but_first = torch.zeros_like(weight) -1e6\n",
    "        weight_block_all_but_first[:, :, 0] = weight[:, :, 0]\n",
    "        weight_block_all_but_first = torch.softmax(weight_block_all_but_first, dim=-1)\n",
    "        \n",
    "        weight_block_all_but_last = torch.zeros_like(weight) -1e6\n",
    "        weight_block_all_but_last[:, :, source_index] = weight[:, :, source_index]\n",
    "        weight_block_all_but_last = torch.softmax(weight_block_all_but_last, dim=-1)\n",
    "        \n",
    "        weight_block_all_but_subj_last = torch.zeros_like(weight) -1e6\n",
    "        weight_block_all_but_subj_last[:, :, e_range[-1]] = weight[:, :, e_range[-1]]\n",
    "        weight_block_all_but_subj_last = torch.softmax(weight_block_all_but_subj_last, dim=-1)\n",
    "        \n",
    "        weight_block_all_but_subj_last_last = torch.zeros_like(weight) -1e6\n",
    "        weight_block_all_but_subj_last_last[:, :, e_range[-1]] = weight[:, :, e_range[-1]]\n",
    "        weight_block_all_but_subj_last_last[:, :, source_index] = weight[:, :, source_index]\n",
    "        weight_block_all_but_subj_last_last = torch.softmax(weight_block_all_but_subj_last_last, dim=-1)\n",
    "        \n",
    "        weight_block_all = torch.zeros_like(weight)\n",
    "        \n",
    "        for (weight_mat, weight_desc) in [\n",
    "            (weight, \"MHSA*\"),\n",
    "            (weight_block_subj, \"MHSA block subject\"),\n",
    "            (weight_block_last, \"MHSA block last\"),\n",
    "            (weight_block_subj_last, \"MHSA block subject-last\"),\n",
    "            (weight_block_subj_last_and_last, \"MHSA block subject-last + last\"),\n",
    "            (weight_block_nonsubj, \"MHSA block non-subject\"),\n",
    "            (weight_block_nonsubj_but_last, \"MHSA block non-subject ex. last\"),\n",
    "            (weight_block_all_but_first, \"MSHA block all but first\"),\n",
    "            (weight_block_all_but_last, \"MSHA block all but last\"),\n",
    "            (weight_block_all_but_subj_last, \"MHSA block all but subject-last\"),\n",
    "            (weight_block_all_but_subj_last_last, \"MHSA block all but subject-last + last\"),\n",
    "            (weight_block_all, \"MHSA block all\")\n",
    "        ]:\n",
    "            attn_out = torch.matmul(weight_mat, val)\n",
    "            attn_out = _merge_heads(attn_out, mt.model)\n",
    "            attn_out = torch.addmm(attn_c_proj.bias, attn_out, attn_c_proj.weight).squeeze()\n",
    "            \n",
    "            proj = attn_out.matmul(E.T).cpu().numpy()\n",
    "            ind = np.argsort(-proj, axis=-1)\n",
    "            attribute_tok_rank = np.where(ind == attribute_tok)[0][0]\n",
    "            attribute_tok_score = proj[ind[attribute_tok_rank]]\n",
    "            top_k_preds = [decode_tokens(mt.tokenizer, [i])[0] for i in ind[:k]]\n",
    "            records.append({\n",
    "                \"prompt\": prompt,\n",
    "                \"subject\": subject,\n",
    "                \"attribute\": attribute,\n",
    "                \"attribute_tok\": attribute_tok,\n",
    "                \"attribute_tok_str\": attribute_tok_str,\n",
    "                \"layer\": layer,\n",
    "                \"proj_vec\": weight_desc,\n",
    "                \"top_k_preds\": top_k_preds,\n",
    "                \"attribute_tok_rank\": attribute_tok_rank,\n",
    "                \"attribute_tok_score\": attribute_tok_score,\n",
    "                \"attribute_in_top_1\": attribute_tok_rank == 0,\n",
    "            })\n",
    "\n",
    "\n",
    "tmp = pd.DataFrame.from_records(records)\n",
    "tmp[\"layer_1\"] = tmp.layer.apply(lambda x: x+1)\n",
    "\n",
    "# Plot the results\n",
    "plt.figure(figsize=(5,3))\n",
    "order = [\"MHSA\", \"MLP\"]\n",
    "ax = sns.lineplot(\n",
    "    x=\"layer_1\", y=\"attribute_in_top_1\",\n",
    "    hue=\"proj_vec\", style=\"proj_vec\",\n",
    "    hue_order=order, style_order = order,\n",
    "    data=tmp[tmp.proj_vec.isin(order)],\n",
    "    palette=palette[:2]\n",
    ")\n",
    "ax.legend_.set_title(\"\")\n",
    "ax.set_ylabel(\"attribute extraction rate\")\n",
    "ax.set_xlabel(\"layer\")\n",
    "sns.move_legend(ax, \"upper left\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Attribute extraction statistics: Per-example extraction rate (across layers).\n",
    "# For how many examples there's at least one layer, where the attribute in the attention's output.\n",
    "\n",
    "tmp_ = tmp[\n",
    "    [\"prompt\", \"proj_vec\", \"attribute_in_top_1\"]\n",
    "].groupby([\"prompt\", \"proj_vec\"]).agg(\"max\").reset_index()\n",
    "\n",
    "tmp_ [[\"proj_vec\", \"attribute_in_top_1\"]\n",
    "     ].groupby(\"proj_vec\").agg(\"mean\").reset_index().sort_values(by=\"attribute_in_top_1\", ascending=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Attribute extraction statistics: Number of extracting layers\n",
    "\n",
    "# Averaging over examples with >=1 extraction events. \n",
    "tmp_ = tmp[\n",
    "    [\"prompt\", \"layer\", \"proj_vec\", \"attribute_in_top_1\"]\n",
    "].groupby([\"prompt\", \"proj_vec\", \"attribute_in_top_1\"]).agg(\"count\").reset_index()\n",
    "display(tmp_[tmp_.attribute_in_top_1 == True].groupby(\"proj_vec\").agg(\"mean\").reset_index())\n",
    "\n",
    "# Averaging over all the examples.\n",
    "tmp_ = tmp[\n",
    "    [\"prompt\", \"proj_vec\", \"attribute_in_top_1\"]\n",
    "].groupby([\"prompt\", \"proj_vec\"]).agg(\"sum\").reset_index()\n",
    "display(tmp_.groupby(\"proj_vec\").agg(\"mean\").reset_index())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Correlation between ATTN vs. MLP extraction events\n",
    "\n",
    "tmp_ = tmp[tmp.proj_vec.isin([\"MHSA\", \"MLP\"])][\n",
    "    [\"prompt\", \"layer\", \"proj_vec\", \"attribute_in_top_1\"]\n",
    "].groupby([\"prompt\", \"layer\", \"proj_vec\"]).agg(\"max\").reset_index()\n",
    "\n",
    "tmp_ = tmp_.sort_values(by=[\"prompt\", \"layer\", \"proj_vec\"])\n",
    "tmp_ = tmp_.set_index(['prompt', 'layer', 'proj_vec'])['attribute_in_top_1'].unstack().reset_index()\n",
    "tmp_ = tmp_.sort_values(by=[\"prompt\", \"layer\"])\n",
    "\n",
    "tmp_[\"MHSA\"] = tmp_.apply(lambda row: int(row.MHSA) * (row.layer+1) - 1, axis=1)\n",
    "tmp_[\"MLP\"] = tmp_.apply(lambda row: int(row.MLP) * (row.layer+1) - 1, axis=1)\n",
    "tmp_ = tmp_[[\"prompt\", \"MHSA\", \"MLP\"]].groupby('prompt').agg(lambda x: [y for y in x.tolist() if y>-1]).reset_index()\n",
    "tmp_[\"MHSA_first\"] = tmp_.MHSA.apply(lambda x: min(x) if len(x) > 0 else -1)\n",
    "tmp_[\"MLP_first\"] = tmp_.MLP.apply(lambda x: min(x) if len(x) > 0 else -1)\n",
    "tmp_[\"MHSA_ex\"] = tmp_.MHSA.apply(lambda x: len(x) > 0)\n",
    "tmp_[\"MLP_ex\"] = tmp_.MLP.apply(lambda x: len(x) > 0)\n",
    "\n",
    "print(len(tmp_))\n",
    "print(tmp_[[\"MHSA_ex\", \"MLP_ex\"]].value_counts() * 100.0 / len(tmp_))\n",
    "\n",
    "tmp__ = tmp_[(tmp_.MHSA_ex == True) & (tmp_.MLP_ex == True)]\n",
    "assert tmp__.MHSA_first.min() > -1\n",
    "assert tmp__.MLP_first.min() > -1\n",
    "tmp__[\"MHSA_before_MLP\"] = tmp__.apply(lambda row: row.MHSA_first <= row.MLP_first, axis=1)\n",
    "print(len(tmp__))\n",
    "print(tmp__[\"MHSA_before_MLP\"].value_counts() * 100.0 / len(tmp__))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Attribute extraction rate with patching of early representations\n",
    "\n",
    "E = mt.model.get_input_embeddings().weight\n",
    "k = 10\n",
    "\n",
    "records = []\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "    subject = row.subject\n",
    "    attribute = row.attribute\n",
    "    \n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    input_tokens = decode_tokens(mt.tokenizer, inp[\"input_ids\"][0])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "    non_e_range = [x for x in range(len(input_tokens)-1) if x not in e_range]\n",
    "    source_index = len(input_tokens) - 1\n",
    "\n",
    "    # set hooks to get ATTN outputs\n",
    "    hooks = set_act_get_hooks(mt.model, source_index, attn_out=True)\n",
    "    \n",
    "    output = mt.model(**inp)\n",
    "    probs = torch.softmax(output[\"logits\"][:, -1], dim=1)\n",
    "    _, attribute_tok = torch.max(probs, dim=1)\n",
    "    attribute_tok = attribute_tok.cpu().item()\n",
    "    [attribute_tok_str] = decode_tokens(mt.tokenizer, [attribute_tok])\n",
    "    \n",
    "    activations = {key: val for key, val in mt.model.activations_.items()}\n",
    "    mt.model.activations_ = {}\n",
    "    \n",
    "\n",
    "    for layer in range(mt.num_layers):\n",
    "        # ATTN\n",
    "        attn_out = activations[f'attn_out_{layer}'][0]\n",
    "        proj = attn_out.matmul(E.T).cpu().numpy()\n",
    "        ind = np.argsort(-proj, axis=-1)\n",
    "        attribute_tok_rank = np.where(ind == attribute_tok)[0][0]\n",
    "        attribute_tok_score = proj[ind[attribute_tok_rank]]\n",
    "        top_k_preds = [decode_tokens(mt.tokenizer, [i])[0] for i in ind[:k]]\n",
    "        records.append({\n",
    "            \"prompt\": prompt,\n",
    "            \"subject\": subject,\n",
    "            \"attribute\": attribute,\n",
    "            \"attribute_tok\": attribute_tok,\n",
    "            \"attribute_tok_str\": attribute_tok_str,\n",
    "            \"layer\": layer,\n",
    "            \"proj_vec\": \"attn\",\n",
    "            \"top_k_preds\": top_k_preds,\n",
    "            \"attribute_tok_rank\": attribute_tok_rank,\n",
    "            \"attribute_tok_score\": attribute_tok_score,\n",
    "            \"attribute_in_top_1\": attribute_tok_rank == 0,\n",
    "            \"patch_desc\": \"-\",\n",
    "            \"patch_layer\": \"-\",\n",
    "        })\n",
    "\n",
    "        # ATTN over patched subject representations\n",
    "        for patch_positions, patch_desc in [(e_range, \"subject\"),\n",
    "                                            (non_e_range, \"non-subject\"),\n",
    "                                            ([source_index], \"last\")]:\n",
    "            for layer_ in [0, 1, 5, 10, 20]:\n",
    "                # set hooks to patch early hidden states\n",
    "                hs_patch_config = {\n",
    "                    layer: [\n",
    "                        (i, hs_cache[(prompt, layer_)][0][i])\n",
    "                        for i in patch_positions\n",
    "                    ]\n",
    "                }\n",
    "                patch_hooks = set_hs_patch_hooks(mt.model, hs_patch_config, patch_input=True)\n",
    "\n",
    "                # run model on the same prompt\n",
    "                _ = mt.model(**inp)\n",
    "\n",
    "                # remove patching hooks\n",
    "                remove_hooks(patch_hooks)\n",
    "\n",
    "                attn_out = mt.model.activations_[f'attn_out_{layer}'][0]\n",
    "                proj = attn_out.matmul(E.T).cpu().numpy()\n",
    "                ind = np.argsort(-proj, axis=-1)\n",
    "                attribute_tok_rank = np.where(ind == attribute_tok)[0][0]\n",
    "                attribute_tok_score = proj[ind[attribute_tok_rank]]\n",
    "                top_k_preds = [decode_tokens(mt.tokenizer, [i])[0] for i in ind[:k]]\n",
    "                records.append({\n",
    "                    \"prompt\": prompt,\n",
    "                    \"subject\": subject,\n",
    "                    \"attribute\": attribute,\n",
    "                    \"attribute_tok\": attribute_tok,\n",
    "                    \"attribute_tok_str\": attribute_tok_str,\n",
    "                    \"layer\": layer,\n",
    "                    \"proj_vec\": \"attn\",\n",
    "                    \"top_k_preds\": top_k_preds,\n",
    "                    \"attribute_tok_rank\": attribute_tok_rank,\n",
    "                    \"attribute_tok_score\": attribute_tok_score,\n",
    "                    \"attribute_in_top_1\": attribute_tok_rank == 0,\n",
    "                    \"patch_desc\": patch_desc,\n",
    "                    \"patch_layer\": str(layer_),\n",
    "                })\n",
    "                mt.model.activations_ = {}\n",
    "\n",
    "    # remove hooks\n",
    "    remove_hooks(hooks)\n",
    "\n",
    "tmp = pd.DataFrame.from_records(records)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "tmp_ = tmp[\n",
    "    [\"prompt\", \"patch_layer\", \"patch_desc\", \"attribute_in_top_1\"]\n",
    "].groupby([\"prompt\", \"patch_desc\", \"patch_layer\"]).agg(\"max\").reset_index()\n",
    "tmp__ = tmp_[[\"patch_layer\", \"patch_desc\", \"attribute_in_top_1\"]].groupby(\n",
    "    [\"patch_layer\", \"patch_desc\"]).agg(\"mean\").reset_index()\n",
    "\n",
    "tmp__[\"patch_layer_int\"] = tmp__.patch_layer.apply(lambda x: literal_eval(x) if x != \"-\" else -1)\n",
    "\n",
    "\n",
    "plt.figure(figsize=(4.5,2.5))\n",
    "order = [\"subject\", \"non-subject\", \"last\"]\n",
    "ax = sns.scatterplot(\n",
    "    x=\"patch_layer_int\", \n",
    "    y=f\"attribute_in_top_1\",\n",
    "    hue=\"patch_desc\", style=\"patch_desc\",\n",
    "    hue_order=order, style_order=order,\n",
    "    data=tmp__[tmp__.patch_layer != \"-\"],\n",
    "    palette=palette[:3],\n",
    "    s=100\n",
    ")\n",
    "ax.set_xlabel(\"layer used for patching\")\n",
    "ax.set_ylabel(f\"attribute extraction rate\")\n",
    "sns.move_legend(ax, \"lower right\", title=\"patched positions\", bbox_to_anchor=(1.01, -0.01), ncol=1)\n",
    "\n",
    "no_patch_mean = tmp__[tmp__.patch_layer == \"-\"][\"attribute_in_top_1\"].mean()\n",
    "plt.axhline(y=no_patch_mean, color=palette[4], linestyle='-', linewidth=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Comparing the attribute rank in the subject representation and in the attention output\n",
    "\n",
    "E = mt.model.get_input_embeddings().weight\n",
    "\n",
    "records = []\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "    subject = row.subject\n",
    "    attribute = row.attribute\n",
    "    \n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    output = mt.model(**inp)\n",
    "    probs = torch.softmax(output[\"logits\"][:, -1], dim=1)\n",
    "    _, attribute_tok = torch.max(probs, dim=1)\n",
    "    attribute_tok = attribute_tok.cpu().item()\n",
    "    [attribute_tok_str] = decode_tokens(mt.tokenizer, [attribute_tok])\n",
    "    \n",
    "    input_tokens = decode_tokens(mt.tokenizer, inp[\"input_ids\"][0])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "    source_index = len(input_tokens) - 1\n",
    "    \n",
    "    # set hooks to get ATTN and MLP outputs\n",
    "    hooks = set_act_get_hooks(mt.model, source_index, mlp=True, attn_out=True)\n",
    "    output = mt.model(**inp)\n",
    "    # remove hooks\n",
    "    remove_hooks(hooks)\n",
    "    \n",
    "    for layer in range(mt.num_layers):\n",
    "        attn_out = mt.model.activations_[f'attn_out_{layer}'][0]\n",
    "        proj_attn = attn_out.matmul(E.T).cpu().numpy()\n",
    "        ind_attn = np.argsort(-proj_attn, axis=-1)\n",
    "        attribute_tok_rank_attn = np.where(ind_attn == attribute_tok)[0][0]\n",
    "        \n",
    "        subj_hs = subject_cache[(subject, layer)][0]\n",
    "        proj_hs = subj_hs.matmul(E.T).cpu().numpy()\n",
    "        ind_hs = np.argsort(-proj_hs, axis=-1)\n",
    "        attribute_tok_rank_hs = np.where(ind_hs == attribute_tok)[0][0]\n",
    "        attribute_tok_rank_diff = attribute_tok_rank_hs - attribute_tok_rank_attn\n",
    "        \n",
    "        records.append({\n",
    "            \"prompt\": prompt,\n",
    "            \"subject\": subject,\n",
    "            \"attribute\": attribute,\n",
    "            \"attribute_tok\": attribute_tok,\n",
    "            \"attribute_tok_str\": attribute_tok_str,\n",
    "            \"layer\": layer,\n",
    "            \"attribute_tok_rank_attn\": attribute_tok_rank_attn,\n",
    "            \"attribute_tok_rank_hs\": attribute_tok_rank_hs,\n",
    "            \"attribute_tok_rank_diff\": attribute_tok_rank_diff,\n",
    "            \"attribute_in_top_1_attn\": attribute_tok == ind_attn[0],\n",
    "            \"attribute_in_top_1_hs\": attribute_tok == ind_hs[0],\n",
    "            })\n",
    "        \n",
    "tmp = pd.DataFrame.from_records(records)\n",
    "print(tmp[tmp[f\"attribute_in_top_1_attn\"] == True][\"attribute_tok_rank_diff\"].describe())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Subject enrichment"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Get token representations' projections"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "E = mt.model.get_input_embeddings().weight.detach()\n",
    "k = 500"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Projection of token representations\n",
    "\n",
    "records = []\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "    subject = row.subject\n",
    "    \n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "    \n",
    "    for layer in range(mt.num_layers):\n",
    "        positions = [(0, f\"first_token_{layer+1}\"),\n",
    "                     (e_range[-1], f\"subj_last_{layer+1}\"),\n",
    "                     (e_range[0], f\"subj_first_{layer+1}\"),\n",
    "                     (e_range[-1]+1, f\"no_subj_follow_{layer+1}\"),\n",
    "                     (len(inp[\"input_ids\"][0])-1, f\"no_subj_last_{layer+1}\")]\n",
    "        for (position, desc) in positions:\n",
    "            hs = hs_cache[(prompt, layer)][0][position]\n",
    "            projs = hs.matmul(E.T).cpu().numpy()\n",
    "            ind = np.argsort(-projs)\n",
    "\n",
    "            records.append({\n",
    "                \"example_index\": row_i,\n",
    "                \"subject\": subject,\n",
    "                \"layer\": layer,\n",
    "                \"position\": position,\n",
    "                \"desc\": desc,\n",
    "                \"desc_short\": desc.rsplit(\"_\", 1)[0],\n",
    "                \"top_k_preds\": [decode_tokens(mt.tokenizer, [i])[0] for i in ind[:k]],\n",
    "            })\n",
    "\n",
    "tmp = pd.DataFrame.from_records(records)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Projection of token embeddings\n",
    "\n",
    "records = []\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    subject = row.subject\n",
    "    prompt = row.prompt\n",
    "    prompt = \"<|endoftext|> \" + prompt  # fix first-position bias\n",
    "    \n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "    subject_tok = [inp[\"input_ids\"][0][i].item() for i in e_range]\n",
    "    subject_tok_str = [decode_tokens(mt.tokenizer, [t])[0] for t in subject_tok]\n",
    "    \n",
    "    vec = E[subject_tok, :].mean(axis=0)\n",
    "    proj = vec.matmul(E.T).cpu().numpy()\n",
    "    ind = np.argsort(-proj)\n",
    "    record = {\n",
    "        \"example_index\": row_i,\n",
    "        \"prompt\": row.prompt,\n",
    "        \"subject\": subject,\n",
    "        \"subject_tok\": subject_tok,\n",
    "        \"subject_tok_str\": str(subject_tok_str),\n",
    "        \"top_k_preds_str\": [decode_tokens(mt.tokenizer, [t])[0] for t in ind[:k]],\n",
    "    }\n",
    "    records.append(record)\n",
    "\n",
    "tmp = pd.DataFrame.from_records(records)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Projection of token representations while applying knockouts to MHSA/MLP sublayers\n",
    "\n",
    "all_mlp_dims = list(range(mt.model.config.n_embd * 4))\n",
    "subject_repr_layer = 40\n",
    "num_block_layers = 10\n",
    "\n",
    "records = []\n",
    "for row_i, row in tqdm(knowns_df.iterrows()):\n",
    "    prompt = row.prompt\n",
    "    subject = row.subject\n",
    "    inp = make_inputs(mt.tokenizer, [prompt])\n",
    "    e_range = find_token_range(mt.tokenizer, inp[\"input_ids\"][0], subject)\n",
    "    e_range = [x for x in range(e_range[0], e_range[1])]\n",
    "    position = e_range[-1]\n",
    "    \n",
    "    output_ = mt.model(**inp, output_hidden_states = True)\n",
    "    hs_ = output_[\"hidden_states\"][subject_repr_layer+1][0, position]\n",
    "    projs_ = hs_.matmul(E.T).cpu().numpy()\n",
    "    ind_ = np.argsort(-projs_)\n",
    "    top_k_preds_ = [decode_tokens(mt.tokenizer, [i])[0] for i in ind_[:k]]\n",
    "    \n",
    "    for start_block_layer in range(subject_repr_layer):\n",
    "        records.append({\n",
    "            \"example_index\": row_i,\n",
    "            \"subject\": subject,\n",
    "            \"layer\": subject_repr_layer,\n",
    "            \"position\": position,\n",
    "            \"block_layers\": [],\n",
    "            \"block_module\": \"None\",\n",
    "            \"start_block_layer\": start_block_layer,\n",
    "            \"end_block_layer\": -1,\n",
    "            \"num_block_layers\": 0,\n",
    "            \"num_block_layers_\": 0,\n",
    "            \"top_k_preds\": top_k_preds_\n",
    "        })\n",
    "        \n",
    "        end_block_layer = min(start_block_layer + num_block_layers + 1, subject_repr_layer)\n",
    "        block_layers = [l for l in range(start_block_layer, end_block_layer)]\n",
    "        for block_module in [\"mlp\", \"attn\"]:\n",
    "            with torch.no_grad():\n",
    "                if block_module == \"mlp\":\n",
    "                    block_config = {layer_: all_mlp_dims for layer_ in block_layers}\n",
    "                    block_mlp_hooks = set_block_mlp_hooks(mt.model, block_config)\n",
    "                    output = mt.model(**inp, output_hidden_states = True)\n",
    "                    remove_hooks(block_mlp_hooks)\n",
    "                elif block_module == \"attn\":\n",
    "                    block_config = {layer_: [] for layer_ in block_layers}\n",
    "                    block_attn_hooks = set_block_attn_hooks(mt.model, block_config, opposite=True)\n",
    "                    output = mt.model(**inp, output_hidden_states = True)\n",
    "                    remove_wrapper(mt.model, block_attn_hooks)\n",
    "\n",
    "            hs = output[\"hidden_states\"][subject_repr_layer+1][0, position]\n",
    "            projs = hs.matmul(E.T).cpu().numpy()\n",
    "            ind = np.argsort(-projs)\n",
    "\n",
    "            records.append({\n",
    "                \"example_index\": row_i,\n",
    "                \"subject\": subject,\n",
    "                \"layer\": subject_repr_layer,\n",
    "                \"position\": position,\n",
    "                \"block_layers\": block_layers,\n",
    "                \"block_module\": block_module,\n",
    "                \"start_block_layer\": start_block_layer,\n",
    "                \"end_block_layer\": end_block_layer-1,\n",
    "                \"num_block_layers\": num_block_layers,\n",
    "                \"num_block_layers_\": len(block_layers),\n",
    "                \"top_k_preds\": [decode_tokens(mt.tokenizer, [i])[0] for i in ind[:k]]\n",
    "            })\n",
    "\n",
    "tmp = pd.DataFrame.from_records(records)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Prepare attributes rate evaluation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Processing of Wikipedia paragraphs for automatic attribute rate evaluation.\n",
    "\n",
    "# This should be a path to a csv file with 2 columns and a header of column names \"subject\" and \"paragraphs\".\n",
    "# Each entry should have (a) a subject (string) from the \"knowns\" data (knowns_df) \n",
    "# and (b) paragraphs concatenated with space about the subject (a single string).\n",
    "paragraphs_data_path = None\n",
    "df_wiki = pd.read_csv(paragraphs_data_path)\n",
    "\n",
    "# Tokenize, remove duplicate tokens, stopwords, and subwords. \n",
    "df_wiki[\"context_tokenized_dedup\"] = df_wiki[\"paragraphs\"].progress_apply(\n",
    "    lambda x: list(set(decode_tokens(mt.tokenizer, mt.tokenizer([x])['input_ids'][0])))\n",
    ")\n",
    "df_wiki[\"context_tokenized_dedup_len\"] = df_wiki.context_tokenized_dedup.apply(lambda x: len(x))\n",
    "\n",
    "df_wiki[\"context_tokenized_dedup_no-stopwords\"] = df_wiki.context_tokenized_dedup.apply(\n",
    "    lambda x: [\n",
    "        y for y in x \n",
    "        if y.strip() not in stopwords0_ and len(y.strip())>2\n",
    "    ]\n",
    ")\n",
    "df_wiki[\"context_tokenized_dedup_no-stopwords_len\"] = df_wiki[\"context_tokenized_dedup_no-stopwords\"].apply(\n",
    "    lambda x: len(x))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_preds_wiki_overlap(subject, top_preds):\n",
    "    wiki_toks = df_wiki[df_wiki.subject == subject]\n",
    "    if len(wiki_toks) == 0:\n",
    "        return -1\n",
    "    wiki_toks = wiki_toks.iloc[0][\"context_tokenized_dedup_no-stopwords\"]\n",
    "    preds_wiki_inter = set(top_preds).intersection(set(wiki_toks))\n",
    "    \n",
    "    return len(preds_wiki_inter) * 100.0 / len(top_preds)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Evaluate attributes rate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tmp[\"top_k_preds_clean\"] = tmp.top_k_preds.progress_apply(lambda x: [\n",
    "    y for y in x \n",
    "    if y.strip().lower() not in stopwords0_ and len(y.strip())>2\n",
    "])\n",
    "tmp[\"num_clean_tokens\"] = tmp.top_k_preds_clean.progress_apply(lambda x: len(x))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "m = 50  # evaluate the 50 top-scoring tokens\n",
    "tmp[\"top_k_preds_in_context\"] = tmp.progress_apply(\n",
    "    lambda row: get_preds_wiki_overlap(row[\"subject\"], row[\"top_k_preds_clean\"][:m]), \n",
    "    axis=1\n",
    "    )\n",
    "print(len(tmp[tmp.top_k_preds_in_context == -1]) * 100.0 / len(tmp))\n",
    "print(tmp[tmp.top_k_preds_in_context > -1].subject.nunique())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot the attributes rate at different positions across layers \n",
    "\n",
    "tmp[\"desc_short_\"] = tmp.desc_short.apply(\n",
    "    lambda x: {\"subj_first\": \"subject first\", \n",
    "               \"subj_last\": \"subject last\", \n",
    "               \"no_subj_follow\": \"subject subseq.\", \n",
    "               \"no_subj_last\": \"input last\",\n",
    "               \"first_token\": \"input first\"}[x]\n",
    "    )\n",
    "tmp[\"layer_1\"] = tmp.layer.apply(lambda x: x+1)\n",
    "order = [\"subject last\", \"subject first\", \"subject subseq.\", \"input last\"]\n",
    "\n",
    "plt.figure(figsize=(5,3))\n",
    "ax = sns.lineplot(data=tmp[tmp.top_k_preds_in_context > -1], \n",
    "                 x=\"layer_1\", y=\"top_k_preds_in_context\", hue=\"desc_short_\",\n",
    "                style=\"desc_short_\",\n",
    "                dashes=True,\n",
    "                linewidth=2,\n",
    "                markers=False,\n",
    "                palette=palette[:4],\n",
    "                hue_order=order,\n",
    "                style_order=order\n",
    "                 )\n",
    "ax.set_xlabel(\"layer\")\n",
    "ax.set_ylabel(f\"attributes rate\")\n",
    "sns.move_legend(ax, \"upper left\", title=\"\", \n",
    "                labelspacing=0.3, handlelength=1.0, handletextpad=0.5)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot the change in the attributes rate in the subject representation at a specific layer,\n",
    "#  when knocking out different MLP/MHSA sublayers\n",
    "\n",
    "subject_repr_layer = 40\n",
    "\n",
    "tmp[\"desc_short\"] = tmp[['block_module', 'num_block_layers']].apply(tuple, axis=1)\n",
    "tmp[\"desc_short_\"] = tmp.desc_short.apply(\n",
    "    lambda x: {'mlp': \"MLP sublayers\", \n",
    "               'attn': \"MHSA sublayers\", \n",
    "               'None': \"No intervention\"}[x[0]]\n",
    "    )\n",
    "tmp[\"start_block_layer_1\"] = tmp.start_block_layer.apply(lambda x: x+1)\n",
    "\n",
    "plt.figure(figsize=(4,2))\n",
    "tmp_ = tmp[(tmp.top_k_preds_in_context > -1) & (tmp.num_block_layers.isin([0, 10]))]\n",
    "ax = sns.lineplot(data=tmp_, \n",
    "                  x=\"start_block_layer\", y=\"top_k_preds_in_context\", \n",
    "                  hue=\"desc_short_\",\n",
    "                  style=\"desc_short_\",\n",
    "                  palette=palette[:3],\n",
    "                  dashes=True,\n",
    "                  linewidth=2,\n",
    "                  markers=False\n",
    "                 )\n",
    "ax.legend_.set_title(\"\")\n",
    "ax.set_ylabel(f\"attributes rate\\nat layer {subject_repr_layer}\")\n",
    "ax.set_xlabel(\"intervention layers\")"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.9"
  },
  "vscode": {
   "interpreter": {
    "hash": "2c3ec9f9cb0aa45979d92499665f4b05f2a3528d3b2ca0efacea2020d32b93f4"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
