# main knowledge neurons class
import torch
import torch.nn.functional as F
import torch.nn as nn
import einops
from tqdm import tqdm
import numpy as np
import collections
from typing import List, Optional, Tuple, Callable
import torch
import torch.nn.functional as F
import einops
import collections
import math
from functools import partial
from transformers import PreTrainedTokenizerBase
from transformers import (
    BertTokenizer,
    BertLMHeadModel,
    GPT2Tokenizer,
    GPT2LMHeadModel,
    GPTNeoForCausalLM,
    AutoTokenizer,
    GPTJForCausalLM,
    LlamaTokenizer,
    LlamaForCausalLM,
    LlamaTokenizerFast,
    AutoModelForCausalLM
)

from .patch import *

def initialize_model_and_tokenizer(model_name: str, torch_dtype=torch.bfloat16):
    print(f"WARNING: Loading {model_name} in {torch_dtype} precision.")
    if "bert" in model_name.lower():
        tokenizer = BertTokenizer.from_pretrained(model_name, device_map='auto')
        model = BertLMHeadModel.from_pretrained(model_name, device_map='auto')
    elif "gpt2" in model_name.lower():
        tokenizer = GPT2Tokenizer.from_pretrained(model_name, device_map='auto')
        model = GPT2LMHeadModel.from_pretrained(model_name, device_map='auto')
    elif "gpt-neo" in model_name.lower():
        tokenizer = GPT2Tokenizer.from_pretrained(model_name, device_map='auto', torch_dtype=torch_dtype)
        model = GPTNeoForCausalLM.from_pretrained(model_name, device_map='auto', torch_dtype=torch_dtype)
    elif "gpt-j-6b" in model_name.lower():
        tokenizer = AutoTokenizer.from_pretrained(model_name, torch_dtype=torch_dtype, device_map='auto')
        model = GPTJForCausalLM.from_pretrained(model_name, torch_dtype=torch_dtype, device_map='auto')
    elif "llama" in model_name.lower():
        tokenizer = AutoTokenizer.from_pretrained(model_name, torch_dtype=torch_dtype, device_map='auto')
        model = LlamaForCausalLM.from_pretrained(model_name, torch_dtype=torch_dtype, device_map='auto')
    elif "qwen1.5" in model_name.lower() or "qwen2" in model_name.lower():
        model = AutoModelForCausalLM.from_pretrained(model_name,trust_remote_code=True, torch_dtype=torch_dtype, device_map='auto')
        tokenizer = AutoTokenizer.from_pretrained(model_name, eos_token='<|endoftext|>', pad_token='<|endoftext|>',unk_token='<|endoftext|>', trust_remote_code=True)
    elif "qwen" in model_name.lower():
        model = AutoModelForCausalLM.from_pretrained(model_name,fp32=False,trust_remote_code=True, device_map='auto')
        tokenizer = AutoTokenizer.from_pretrained(model_name, eos_token='<|endoftext|>', pad_token='<|endoftext|>',unk_token='<|endoftext|>', trust_remote_code=True)
    elif "mamba" in model_name.lower():
        model = AutoModelForCausalLM.from_pretrained(model_name, device_map='auto', torch_dtype=torch_dtype)
        tokenizer = AutoTokenizer.from_pretrained(model_name, torch_dtype=torch_dtype)
    else:
        raise ValueError(f"Model {model_name} not supported")

    model.eval()

    return model, tokenizer

def model_type(model_name: str):
    if "bert" in model_name.lower():
        return "bert"
    elif "gpt2" in model_name.lower():
        return "gpt2"
    elif "gpt-neo" in model_name.lower():
        return "gpt-neo"
    elif "gpt-j-6b" in model_name.lower():
        return "gpt-j-6b"
    elif "llama-2" in model_name.lower() or "llama2" in model_name.lower():
        return "llama2"
    elif "llama-3" in model_name.lower() or "llama3" in model_name.lower():
        return "llama3"
    elif "qwen" in model_name.lower():  
        return "qwen"
    elif "mamba" in model_name.lower():
        return "mamba"
    else:
        raise ValueError(f"Model {model_name} not supported")

def gpu_mem_report(func):

    def wrapper(*args, **kwargs):
        res = func(*args, **kwargs)
        torch.cuda.empty_cache()

        return res
    
    return wrapper


class KnowledgeNeurons:
    def __init__(
        self,
        model: nn.Module,
        tokenizer: PreTrainedTokenizerBase,
        model_type: str = "bert",
        device: str = "cuda",
    ):
        self.model = model
        self.model_type = model_type
        self.device = device
        self.tokenizer = tokenizer
        self.baseline_activations = None

        if self.model_type == "bert":
            self.transformer_layers_attr = "bert.encoder.layer"
            self.input_ff_attr = "intermediate"
            self.output_ff_attr = "output.dense.weight"
            self.word_embeddings_attr = "bert.embeddings.word_embeddings.weight"
            self.unk_token = getattr(self.tokenizer, "unk_token_id", None)
        elif "gpt-j-6b" == model_type:
            self.transformer_layers_attr = "transformer.h"
            self.input_ff_attr = "mlp.fc_in"
            self.output_ff_attr = "mlp.fc_out.weight"
            self.word_embeddings_attr = "transformer.wte"  # 注意和gpt的wpe维度不一致
        elif "gpt2" == model_type or 'gpt-neo' == model_type:
            self.transformer_layers_attr = "transformer.h"
            self.input_ff_attr = "mlp.c_fc"
            self.output_ff_attr = "mlp.c_proj.weight"
            self.word_embeddings_attr = "transformer.wpe"
        elif 'llama' in model_type: # llama2/llama3
            self.transformer_layers_attr = "model.layers"
            self.input_ff_attr = "mlp.up_proj"
            self.output_ff_attr = "mlp.down_proj.weight"
            self.word_embeddings_attr = "model.embed_tokens"
        elif "qwen" == model_type:
            self.transformer_layers_attr = "transformer.h"
            self.input_ff_attr = "mlp.w2"
            self.output_ff_attr = "mlp.c_proj.weight"
            self.word_embeddings_attr = "transformer.wte"
        elif "mamba" == model_type:
            self.transformer_layers_attr = "backbone.layers"
        else:
            raise NotImplementedError

    def _get_output_ff_layer(self, layer_idx):
        return get_ff_layer(
            self.model,
            layer_idx,
            transformer_layers_attr=self.transformer_layers_attr,
            ff_attrs=self.output_ff_attr, # "mlp.down_proj.weight" 得到的是这层的权重weight张量
        )

    def _get_input_ff_layer(self, layer_idx):
        return get_ff_layer(
            self.model,
            layer_idx,
            transformer_layers_attr=self.transformer_layers_attr,
            ff_attrs=self.input_ff_attr, # "mlp.up_proj" 得到的是这层的Linear层,是个对象
        )

    def _get_word_embeddings(self):
        return get_attributes(self.model, self.word_embeddings_attr)

    def _get_transformer_layers(self):
        return get_attributes(self.model, self.transformer_layers_attr)

    def _prepare_inputs(self, prompt, target=None, encoded_input=None):
        if encoded_input is None:
            encoded_input = self.tokenizer(prompt, return_tensors="pt").to(self.device)
        if self.model_type == "bert":
            mask_idx = torch.where(encoded_input["input_ids"][0] == self.tokenizer.mask_token_id)[0].item()
        else: # 类gpt的自回归语言模型,如llama等
            # with autoregressive models we always want to target the last token
            mask_idx = -1
        if target is not None:
            if self.model_type == "bert":
                target = self.tokenizer.convert_tokens_to_ids(target)
            else:
                target = self.tokenizer.encode(target)

        return encoded_input, mask_idx, target

    def _generate(self, prompt, ground_truth):
        encoded_input, mask_idx, target_label = self._prepare_inputs(
            prompt, ground_truth
        )
        # for autoregressive models, we might want to generate > 1 token
        if self.model_type == "gpt":
            n_sampling_steps = len(target_label)
        else:
            n_sampling_steps = 1  # TODO: we might want to use multiple mask tokens even with bert models

        all_gt_probs = []
        all_argmax_probs = []
        argmax_tokens = []
        argmax_completion_str = ""

        for i in range(n_sampling_steps):
            if i > 0:
                # retokenize new inputs
                encoded_input, mask_idx, target_label = self._prepare_inputs(
                    prompt, ground_truth
                )
            outputs = self.model(**encoded_input)
            probs = F.softmax(outputs.logits[:, mask_idx, :], dim=-1)
            if n_sampling_steps > 1:
                target_idx = target_label[i]
            else:
                target_idx = target_label
            gt_prob = probs[:, target_idx].item()
            all_gt_probs.append(gt_prob)

            # get info about argmax completion
            argmax_prob, argmax_id = [i.item() for i in probs.max(dim=-1)]
            argmax_tokens.append(argmax_id)
            argmax_str = self.tokenizer.decode([argmax_id])
            all_argmax_probs.append(argmax_prob)

            prompt += argmax_str
            argmax_completion_str += argmax_str

        gt_prob = math.prod(all_gt_probs) if len(all_gt_probs) > 1 else all_gt_probs[0]
        argmax_prob = (
            math.prod(all_argmax_probs)
            if len(all_argmax_probs) > 1
            else all_argmax_probs[0]
        )
        return gt_prob, argmax_prob, argmax_completion_str, argmax_tokens

    def n_layers(self):
        return len(self._get_transformer_layers())

    @staticmethod
    def scaled_input(activations: torch.Tensor, steps: int = 20, device: str = "cpu"):
        """
        Tiles activations along the batch dimension - gradually scaling them over
        `steps` steps from 0 to their original value over the batch dimensions.

        `activations`: torch.Tensor
        original activations
        `steps`: int
        number of steps to take
        """
        tiled_activations = einops.repeat(activations, "b d -> (r b) d", r=steps)
        out = tiled_activations * torch.linspace(start=0, end=1, steps=steps)[:, None]
        
        return out.to(device)

    @gpu_mem_report
    def get_baseline_with_activations(
        self, 
        encoded_input: dict, 
        layer_idx: int, 
        mask_idx: int = -1, # 类gpt模型,默认取最后一个token的位置
        transformer_layers_attr: str = "model.layers",
        ff_attrs: str = "mlp.up_proj",
    ):
        """
        Gets the baseline outputs and activations for the unmodified model at a given index.

        `encoded_input`: torch.Tensor
            the inputs to the model from self.tokenizer.encode_plus()
        `layer_idx`: int
            which transformer layer to access
        `mask_idx`: int
            the position at which to get the activations (TODO: rename? with autoregressive models there's no mask, so)
        `transformer_layers_attr`: str
            模型层名的前缀, 如llama模型为"model.layers",gpt模型为"transformer.h"
        `ff_attrs`: str
            模型层的后缀,具体是那个模型组件（注意力层,前馈层等）。如llama模型的ffn中w1为"mlp.up_proj",gpt模型的ffn中w1为"mlp.c_fc"
        """

        def get_activations(model, layer_idx, mask_idx):
            """
            This hook should assign the intermediate activations at a given layer / mask idx
            to the 'self.baseline_activations' variable
            """

            def hook_fn(acts):
                self.baseline_activations = acts[:, mask_idx, :]

            return register_hook(
                model,
                layer_idx=layer_idx,
                f=hook_fn,
                transformer_layers_attr=transformer_layers_attr,
                ff_attrs=ff_attrs,
            )

        handle = get_activations(self.model, layer_idx=layer_idx, mask_idx=mask_idx)
        baseline_outputs = self.model(**encoded_input)
        handle.remove()
        baseline_activations = self.baseline_activations
        self.baseline_activations = None
        
        return baseline_outputs.logits.cpu(), baseline_activations.cpu()

    @gpu_mem_report
    def integrated_grads_by_step(
        self, 
        encoded_input, 
        layer_idx, 
        mask_idx, 
        target_idx, 
        batch_weights, 
        batch_size, 
        transformer_layers_attr, 
        ff_attrs,
    ):
        # we want to replace the intermediate activations at some layer, at the mask position, with `batch_weights`
        # first tile the inputs to the correct batch size
        inputs = {
            "input_ids": einops.repeat(encoded_input["input_ids"], "b d -> (r b) d", r=batch_size),
            "attention_mask": einops.repeat(encoded_input["attention_mask"], "b d -> (r b) d", r=batch_size),
        }
        if self.model_type == "bert":
            inputs["token_type_ids"] = einops.repeat(encoded_input["token_type_ids"], "b d -> (r b) d", r=batch_size)
        
        # then patch the model to replace the activations with the scaled activations
        patch_ff_layer(
            self.model,
            layer_idx=layer_idx,
            mask_idx=mask_idx,
            replacement_activations=batch_weights, # torch.Size([20, 16384])
            transformer_layers_attr=transformer_layers_attr,
            ff_attrs=ff_attrs,
        )

        try:
            outputs = self.model(**inputs) # GPU 23.1%
            probs = F.softmax(outputs.logits[:, mask_idx, :], dim=-1) # GPU 27.5%
            grad = torch.autograd.grad(torch.unbind(probs[:, target_idx]), batch_weights)[0] # GPU 27.9%
            grad_w = grad.sum(dim=0)
            grad_w_cpu = grad_w.cpu()
        except torch.cuda.OutOfMemoryError as e:
            print(f'Error:integrated_grads_by_step {e}')
            # patch_ff_layer和unpatch_ff_layer需要成对执行
            print(f'Error:integrated_grads_by_step length tokens:{inputs["input_ids"].shape}')
            unpatch_ff_layer(
                self.model,
                layer_idx=layer_idx,
                transformer_layers_attr=transformer_layers_attr,
                ff_attrs=ff_attrs,
            )
            # 忽略处理,不抛出
            raise # 抛出错误,以便外部处理。
            # 避免两次unpatch_ff_layer
        
        unpatch_ff_layer(
            self.model,
            layer_idx=layer_idx,
            transformer_layers_attr=transformer_layers_attr,
            ff_attrs=ff_attrs,
        )
        
        return grad_w_cpu

    @gpu_mem_report
    def get_scores_for_layer(
        self,
        prompt: str,
        answer: str,
        layer_idx: int,
        batch_size: int = 10,
        steps: int = 20,
        encoded_input: Optional[int] = None,
        transformer_layers_attr: str = "model.layers",
        ff_attrs: str = "mlp.up_proj",
        prob_type: str = "target_new",
        next_token: str = "answer_next_token",
        max_sample_steps: int = 100,
        max_tokens: int = 128,
        scal_gas: bool = False,
    ):
        """
        get the attribution scores for a given layer
        `prompt`: str
            the prompt to get the attribution scores for
        `answer`: str
            the ground truth / expected output
        `layer_idx`: int
            the layer to get the scores for
        `batch_size`: int
            batch size
        `steps`: int
            total number of steps (per token) for the integrated gradient calculations
        `encoded_input`: int
            if not None, then use this encoded input instead of getting a new one
        `transformer_layers_attr`: str
            模型层名的前缀, 如llama模型为"model.layers",gpt模型为"transformer.h"
        `ff_attrs`: str
            模型层的后缀,具体是那个模型组件（注意力层,前馈层等）。如llama模型的ffn中w1为"mlp.up_proj",gpt模型的ffn中w1为"mlp.c_fc"
        `prob_type`: str
            the type of probability to use for calculating the attribution scores. Choose from 'ground_truth','target_new' or 'llm_answer'.
            target_new表示编辑的目标,也就是正确答案
        `next_token`: str
            the next token to use for generate. Only used for gpt-like models.可选:'argmax_next_token'采样概率最大的下一个token,'answer_next_token'采样正确答案的下一个token
        """
        # assert steps % batch_size == 0
        n_batches = steps // batch_size
        # First we take the unmodified model and use a hook to return the baseline intermediate activations at our chosen target layer
        encoded_input, mask_idx, target_label = self._prepare_inputs(prompt, answer, encoded_input)
        # for autoregressive models, we might want to generate > 1 token
        if self.model_type == "bert":
            n_sampling_steps = 1  # TODO: we might want to use multiple mask tokens even with bert models
        # llama模型,当答案是完整句子时,bos和eos才用得到
        elif self.model_type == "llama2": # DO
            # llama分词器得到token id列表是 开始token id + 词token id
            # "bos_token_id": 1,  "eos_token_id": 2,
            n_sampling_steps = len(target_label)
            assert len(target_label) > 1 and target_label[0] == 1, f"target_label:{target_label}"
            target_label = target_label + [2] # 加上eos token id
            # DO:因为对answer tokenize后,第1个token id固定是1,从第2个token开始才是真正的答案对应的token id
        elif self.model_type == 'llama3':
            n_sampling_steps = len(target_label)
            assert len(target_label) > 1 and target_label[0] == 128000, f'target_label:{target_label}'
            target_label += [128009] # eos
        else: # 类gpt自回归语言模型,如llama系列
            n_sampling_steps = len(target_label)
        if n_sampling_steps > max_sample_steps:
            print(f'get_scores_for_layer n_sampling_steps:{n_sampling_steps} > {max_sample_steps}, limit to {max_sample_steps}')
            n_sampling_steps = max_sample_steps # 限制最大采样次数
        @gpu_mem_report
        def sample_integrated_grads(i, prompt):
            print(f'layer:{layer_idx} sample steps:{i}')
            print(prompt)
            encoded_input, _, _ = self._prepare_inputs(prompt)
            tokens_length = encoded_input["input_ids"].shape[1]
            if tokens_length > max_tokens:
                print(f'get_scores_for_layer tokens_length:{tokens_length} > {max_tokens}, limit to {max_tokens}')
                idx = tokens_length - max_tokens
                encoded_input["input_ids"] = encoded_input["input_ids"][:, idx:]
                encoded_input["attention_mask"] = encoded_input["attention_mask"][:, idx:]
                encoded_input["token_type_ids"] = encoded_input["token_type_ids"][:, idx:]

            baseline_outputs_logits, baseline_activations= self.get_baseline_with_activations(
                encoded_input, 
                layer_idx, 
                mask_idx, 
                transformer_layers_attr, 
                ff_attrs
            ) # 53.7%->55.2%
            
            if n_sampling_steps > 1: # llama的tokenize长度>=2
                argmax_next_token = baseline_outputs_logits[:, mask_idx, :].argmax(dim=-1).item()
                if next_token == 'argmax_next_token':
                    next_token_str = self.tokenizer.decode(argmax_next_token)
                elif next_token == 'answer_next_token':
                    if 'llama' in self.model_type:
                        next_token_str = self.tokenizer.decode(target_label[i+1])
                    else:
                        next_token_str = self.tokenizer.decode(target_label[i])
                else:
                    raise ValueError(f'param next_token:{next_token} not supported')
                if prob_type == 'llm_answer':
                    target_idx = argmax_next_token
                elif prob_type in ['target_new', 'ground_truth']:
                    target_idx = target_label[i]
                else:
                    raise ValueError(f'param prob_type:{prob_type} not supported')
                # prompt += next_token_str
                # 当i=n_sampling_steps-1时,虽然此处计算了target_idx和prompt,但是下次已经跳出循环,用不到了.
                if 'llama' in self.model_type and i+1 < n_sampling_steps:# 此处的n_sampling_steps=target_label原来的长度
                    # 采样retokenize的是bos到eos之间的答案对应的token id
                    # target_idx = target_label[i+1] # 虽然之前添加了eos token id [2],此处没用到
                    target_idx = argmax_next_token if prob_type == 'llm_answer' else target_label[i+1]
                else: # gpt, gpt-neo, gpt-j-6b, qwen
                    target_idx = argmax_next_token if prob_type == 'llm_answer' else target_label[i]
                    # 这里的prompt应该是右边+1个token,左边要-1个token?应该不是,如果是这样的话,就没有输出长度的限制了,就可以无限生成
                if i==0:
                    prompt += (' ' + next_token_str) # TODO:添加空格,以便llama模型的tokenizer可以正确处理？？？ 
                else:
                    prompt += next_token_str
            else:
                target_idx = target_label

            # Now we want to gradually change the intermediate activations of our layer from 0 -> their original value
            # and calculate the integrated gradient of the masked position at each step
            # we do this by repeating the input across the batch dimension, multiplying the first batch by 0, the second by 0.1, etc., until we reach 1
            scaled_weights = self.scaled_input(baseline_activations, steps, self.device) # torch.Size([20, 16384])
            scaled_weights.requires_grad_(True)

            integrated_grads_this_step = []  # to store the integrated gradients
            for batch_weights in scaled_weights.chunk(n_batches):
                grad_w = self.integrated_grads_by_step(
                        encoded_input, 
                        layer_idx, 
                        mask_idx, 
                        target_idx, 
                        batch_weights, 
                        batch_size, 
                        transformer_layers_attr, 
                        ff_attrs
                    )
                integrated_grads_this_step.append(grad_w)
            
            # then sum, and multiply by W-hat / m  拆分为多个batch的情况
            integrated_grads_this_step_ = torch.stack(integrated_grads_this_step, dim=0).sum(dim=0) #  概率p对参数w的梯度积分 cjc@sum
            integrated_grads_this_step_ *= baseline_activations.squeeze(0) / steps # cjc@ * W-hat / m?
            
            return integrated_grads_this_step_, prompt
        
        integrated_grads = []
        for i in range(n_sampling_steps):
            try:
                integrated_grad, prompt = sample_integrated_grads(i, prompt)
                if scal_gas: # 缩放梯度归因分数,用于解决非结构化长文本回答.默认不缩放梯度.
                    integrated_grad = integrated_grad/np.sqrt(i+1)
                integrated_grads.append(integrated_grad)
            except torch.cuda.OutOfMemoryError as e:
                print(f'sampling_steps:{i}')
                print(f'Error:get_scores_for_layer->sample_integrated_grads {e}')
                integrated_grads = torch.stack(integrated_grads, dim=0).sum(dim=0) / len(integrated_grads)
                # 后面oom不再采样,直接返回前i次采样结果
                return integrated_grads
  
        integrated_grads = torch.stack(integrated_grads, dim=0).sum(dim=0) / n_sampling_steps # cjc@sum 多个token的情况
        
        return integrated_grads

    def modify_activations(
        self,
        prompt: str,
        ground_truth: str,
        neurons: List[List[int]],
        mode: str = "suppress",
        undo_modification: bool = True,
        quiet: bool = False,
    ) -> Tuple[dict, Callable]:
        results_dict = {}
        _, mask_idx, _ = self._prepare_inputs(
            prompt, ground_truth
        )  # just need to get the mask index for later - probably a better way to do this
        # get the baseline probabilities of the groundtruth being generated + the argmax / greedy completion before modifying the activations
        (
            gt_baseline_prob,
            argmax_baseline_prob,
            argmax_completion_str,
            _,
        ) = self._generate(prompt, ground_truth)
        if not quiet:
            print(
                f"\nBefore modification - groundtruth probability: {gt_baseline_prob}\nArgmax completion: `{argmax_completion_str}`\nArgmax prob: {argmax_baseline_prob}\n"
            )
        results_dict["before"] = {
            "gt_prob": gt_baseline_prob,
            "argmax_completion": argmax_completion_str,
            "argmax_prob": argmax_baseline_prob,
        }

        # patch model to suppress neurons
        # store all the layers we patch so we can unpatch them later
        all_layers = set([n[0] for n in neurons])

        patch_ff_layer(
            self.model,
            mask_idx,
            mode=mode,
            neurons=neurons,
            transformer_layers_attr=self.transformer_layers_attr,
            ff_attrs=self.input_ff_attr,
        )

        # get the probabilities of the groundtruth being generated + the argmax / greedy completion after modifying the activations
        new_gt_prob, new_argmax_prob, new_argmax_completion_str, _ = self._generate(
            prompt, ground_truth
        )
        if not quiet:
            print(
                f"\nAfter modification - groundtruth probability: {new_gt_prob}\nArgmax completion: `{new_argmax_completion_str}`\nArgmax prob: {new_argmax_prob}\n"
            )
        results_dict["after"] = {
            "gt_prob": new_gt_prob,
            "argmax_completion": new_argmax_completion_str,
            "argmax_prob": new_argmax_prob,
        }

        unpatch_fn = partial(
            unpatch_ff_layers,
            model=self.model,
            layer_indices=all_layers,
            transformer_layers_attr=self.transformer_layers_attr,
            ff_attrs=self.input_ff_attr,
        )

        if undo_modification:
            unpatch_fn()
            unpatch_fn = lambda *args: args

        return results_dict, unpatch_fn

    def suppress_knowledge(
        self,
        prompt: str,
        ground_truth: str,
        neurons: List[List[int]],
        undo_modification: bool = True,
        quiet: bool = False,
    ) -> Tuple[dict, Callable]:
        """
        prompt the model with `prompt`, zeroing the activations at the positions specified by `neurons`,
        and measure the resulting affect on the ground truth probability.
        """
        return self.modify_activations(
            prompt=prompt,
            ground_truth=ground_truth,
            neurons=neurons,
            mode="suppress",
            undo_modification=undo_modification,
            quiet=quiet,
        )

    def enhance_knowledge(
        self,
        prompt: str,
        ground_truth: str,
        neurons: List[List[int]],
        undo_modification: bool = True,
        quiet: bool = False,
    ) -> Tuple[dict, Callable]:
        """
        prompt the model with `prompt`, multiplying the activations at the positions
        specified by `neurons` by 2, and measure the resulting affect on the ground truth probability.
        """
        return self.modify_activations(
            prompt=prompt,
            ground_truth=ground_truth,
            neurons=neurons,
            mode="enhance",
            undo_modification=undo_modification,
            quiet=quiet,
        )

    @torch.no_grad()
    def modify_weights(
        self,
        prompt: str,
        neurons: List[List[int]],
        target: str,
        mode: str = "edit",
        erase_value: str = "zero",
        undo_modification: bool = True,
        quiet: bool = False,
    ) -> Tuple[dict, Callable]:
        """
        Update the *weights* of the neural net in the positions specified by `neurons`.
        Specifically, the weights of the second Linear layer in the ff are updated by adding or subtracting the value
        of the word embeddings for `target`.
        """
        assert mode in ["edit", "erase"]
        assert erase_value in ["zero", "unk"]
        results_dict = {}

        _, _, target_label = self._prepare_inputs(prompt, target)
        # get the baseline probabilities of the target being generated + the argmax / greedy completion before modifying the weights
        (
            gt_baseline_prob,
            argmax_baseline_prob,
            argmax_completion_str,
            argmax_tokens,
        ) = self._generate(prompt, target)
        if not quiet:
            print(
                f"\nBefore modification - groundtruth probability: {gt_baseline_prob}\nArgmax completion: `{argmax_completion_str}`\nArgmax prob: {argmax_baseline_prob}"
            )
        results_dict["before"] = {
            "gt_prob": gt_baseline_prob,
            "argmax_completion": argmax_completion_str,
            "argmax_prob": argmax_baseline_prob,
        }

        # get the word embedding values of the baseline + target predictions
        word_embeddings_weights = self._get_word_embeddings()
        if mode == "edit":
            assert (
                self.model_type == "bert"
            ), "edit mode currently only working for bert models - TODO"
            original_prediction_id = argmax_tokens[0]
            original_prediction_embedding = word_embeddings_weights[
                original_prediction_id
            ]
            target_embedding = word_embeddings_weights[target_label]

        if erase_value == "zero":
            erase_value = 0
        else:
            assert self.model_type == "bert", "GPT models don't have an unk token"
            erase_value = word_embeddings_weights[self.unk_token]

        # modify the weights by subtracting the original prediction's word embedding
        # and adding the target embedding
        original_weight_values = []  # to reverse the action later
        for layer_idx, position in neurons:
            output_ff_weights = self._get_output_ff_layer(layer_idx)
            if self.model_type == "gpt2":
                # since gpt2 uses a conv1d layer instead of a linear layer in the ff block, the weights are in a different format
                original_weight_values.append(
                    output_ff_weights[position, :].detach().clone()
                )
            else:
                original_weight_values.append(
                    output_ff_weights[:, position].detach().clone()
                )
            if mode == "edit":
                if self.model_type == "gpt2":
                    output_ff_weights[position, :] -= original_prediction_embedding * 2
                    output_ff_weights[position, :] += target_embedding * 2
                else:
                    output_ff_weights[:, position] -= original_prediction_embedding * 2
                    output_ff_weights[:, position] += target_embedding * 2
            else:
                if self.model_type == "gpt2":
                    output_ff_weights[position, :] = erase_value
                else:
                    output_ff_weights[:, position] = erase_value

        # get the probabilities of the target being generated + the argmax / greedy completion after modifying the weights
        (
            new_gt_prob,
            new_argmax_prob,
            new_argmax_completion_str,
            new_argmax_tokens,
        ) = self._generate(prompt, target)
        if not quiet:
            print(
                f"\nAfter modification - groundtruth probability: {new_gt_prob}\nArgmax completion: `{new_argmax_completion_str}`\nArgmax prob: {new_argmax_prob}"
            )
        results_dict["after"] = {
            "gt_prob": new_gt_prob,
            "argmax_completion": new_argmax_completion_str,
            "argmax_prob": new_argmax_prob,
        }

        def unpatch_fn():
            # reverse modified weights
            for idx, (layer_idx, position) in enumerate(neurons):
                output_ff_weights = self._get_output_ff_layer(layer_idx)
                if self.model_type == "gpt2":
                    output_ff_weights[position, :] = original_weight_values[idx]
                else:
                    output_ff_weights[:, position] = original_weight_values[idx]

        if undo_modification:
            unpatch_fn()
            unpatch_fn = lambda *args: args

        return results_dict, unpatch_fn

    def edit_knowledge(
        self,
        prompt: str,
        target: str,
        neurons: List[List[int]],
        undo_modification: bool = True,
        quiet: bool = False,
    ) -> Tuple[dict, Callable]:
        return self.modify_weights(
            prompt=prompt,
            neurons=neurons,
            target=target,
            mode="edit",
            undo_modification=undo_modification,
            quiet=quiet,
        )

    def erase_knowledge(
        self,
        prompt: str,
        neurons: List[List[int]],
        erase_value: str = "zero",
        target: Optional[str] = None,
        undo_modification: bool = True,
        quiet: bool = False,
    ) -> Tuple[dict, Callable]:
        return self.modify_weights(
            prompt=prompt,
            neurons=neurons,
            target=target,
            mode="erase",
            erase_value=erase_value,
            undo_modification=undo_modification,
            quiet=quiet,
        )
