import json
from tqdm import tqdm
import argparse
import os
import sys
sys.path.append("..")
import torch
from knowledge_neurons import (
    KnowledgeNeurons,
    initialize_model_and_tokenizer,
    model_type,
)

def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--data_path', type=str, 
                        default='../../EasyEdit/dataset/KnowEdit-ms/benchmark_wiki_counterfact_test_cf.json',
                        help='path to data file')
    parser.add_argument('--data_type', type=str, default='counterfact', 
                        choices=['counterfact', 'zsre', 'CKnowEdit', 'recent'],
                        help='data type counterfact or zsre')
    parser.add_argument('--model_name', type=str, default='Meta-Llama-3-8B-Instruct', help='model name')
    parser.add_argument('--start_idx', type=int, default=None, help='start index, default 0')
    parser.add_argument('--end_idx', type=int, default=None, help='end index')
    parser.add_argument('--batch_size', type=int, default=20, help='batch size')
    parser.add_argument('--transformer_layers_attr', type=str, default='', help='transformer layer name')
    parser.add_argument('--ff_attrs', type=str, default='', help='ff layer name')
    parser.add_argument('--use_prompt', action='store_true', help='if use prompt to get gas')
    parser.add_argument('--use_generate_prompt', action='store_true', help='if use generate prompts')
    parser.add_argument('--prob_type', type=str, default='ground_truth', 
                        help='获取模型输出token的概率,可选值: ground_truth, target_new, llm_answer', 
                        choices=['ground_truth', 'target_new', 'llm_answer'])
    parser.add_argument('--layer_idx', type=str, default='0,32', help='model layer index format: start_idx,end_idx')
    parser.add_argument('--steps', type=int, default=20, help='steps for gradient ascent')
    parser.add_argument('--next_token', type=str, default='answer_next_token', 
                        choices=['answer_next_token', 'argmax_next_token'],
                        help='next token for generate prompt')
    parser.add_argument('--max_sample_steps', type=int, default=320, help='max sample steps for gradient ascent')
    parser.add_argument('--max_tokens', type=int, default=128, help='max tokens for generate prompt')
    parser.add_argument('--cache_dir', type=str, default='./', help='cache dir')

    args = parser.parse_args()
    end_idx, start_idx, batch_size = args.end_idx, args.start_idx, args.batch_size
    data_path, data_type = args.data_path, args.data_type
    transformer_layers_attr, ff_attrs = args.transformer_layers_attr, args.ff_attrs
    use_prompt, use_generate_prompt = args.use_prompt, args.use_generate_prompt
    prob_type, steps = args.prob_type, args.steps
    layer_idx_list = [int(i) for i in args.layer_idx.split(',')]
    max_sample_steps = args.max_sample_steps
    next_token = args.next_token
    
    model_name, torch_dtype = args.model_name, torch.bfloat16
    huggingface_cache = os.environ.get('HUGGINGFACE_CACHE')
    model_path = huggingface_cache + model_name
    model, tokenizer = initialize_model_and_tokenizer(model_path, torch_dtype=torch_dtype)
    kn = KnowledgeNeurons(model, tokenizer, model_type=model_type(model_path))

    gas_save_path = f'gas/{model_name}_{data_type}_{next_token}_{prob_type}/{ff_attrs}'
    gas_save_path = os.path.join(args.cache_dir, gas_save_path)
    if not os.path.exists(gas_save_path):
        os.makedirs(gas_save_path)
    # 保存oom的数据，用于之后重跑
    oom_save_path = f'oom/{model_name}_{data_type}_{next_token}_{prob_type}/{ff_attrs}'
    oom_save_path = os.path.join(args.cache_dir, oom_save_path)
    if not os.path.exists(oom_save_path):
        os.makedirs(oom_save_path)
    
    if start_idx is not None and end_idx is not None:
        gas_path = f'{gas_save_path}/{start_idx}-{end_idx}-{model_name}-{args.layer_idx}'
        oom_path = f'{oom_save_path}/{start_idx}-{end_idx}-{model_name}-{args.layer_idx}.json'
    else: 
        gas_path = f'{gas_save_path}/all-{model_name}-{args.layer_idx}'
        oom_path = f'{oom_save_path}/all-{model_name}-{args.layer_idx}.json'
    # base model 暂不使用instruction prompt
    if use_prompt:
        gas_path += '-use-prompt.json'
    else:
        gas_path += '.json'


    with open(data_path, 'r', encoding='utf-8') as f1, \
        open(gas_path, 'w', encoding='utf-8') as f2, \
        open(oom_path, 'w', encoding='utf-8') as f3:
        data_list = json.load(f1)
        if start_idx is not None and end_idx is not None:
            data_list = data_list[start_idx:end_idx]
        for idx, data in tqdm(enumerate(data_list)):
            target_new = data['target_new']

            if data_type == 'counterfact':
                ground_truth = data['ground_truth']
            elif data_type == 'zsre':
                ground_truth = data['ground_truth'][0]
            elif data_type == 'CKnowEdit':
                ground_truth = target_new
            elif data_type == 'recent':
                ground_truth = target_new
            
            query_list = [data['prompt']]
            if use_generate_prompt:
                if 'generate_prompt' in data:
                    query_list.extend(data['generate_prompt'])
                else:
                    raise ValueError(f'Warning:you want to use generate_prompt, but generate_prompt key not in {data_type} dataset')
            
            if prob_type == 'target_new':
                answer = target_new
            elif prob_type == 'ground_truth':
                answer = ground_truth
            elif prob_type == 'llm_answer':
                # 取target_new和ground_truth的长度,取较长的作为答案
                # TODO: 为什么要取较长的答案?
                n1, n2 = len(target_new.split(' ')), len(ground_truth.split(' '))
                answer = target_new if n1 > n2 else ground_truth

            # 处理OOM问题，保存OOM对应的数据
            try:
                for i, query in enumerate(query_list):
                    if use_prompt:
                        # prompt = f"Please answer the question in no more than {answer_len} words!\nQuestion:{query}\nAnswer:"
                        raise NotImplementedError('暂不支持instruction prompt')
                    else:
                        prompt = query

                    print(f'\n{idx}-{i} {"="*100}')
                    print(prompt)
                    print(answer, len(answer.split(' ')))

                    encoded_input = kn.tokenizer(prompt, return_tensors="pt").to(kn.device)
                    # for layer_idx in range(kn.n_layers()):
                    layers_scores_list = []
                    for layer_idx in range(layer_idx_list[0], layer_idx_list[1]):
                        layer_scores = kn.get_scores_for_layer(
                            prompt,
                            answer,
                            encoded_input=encoded_input,
                            layer_idx=layer_idx,
                            batch_size=batch_size,
                            steps=steps, # 知识神经元论文中取值20
                            transformer_layers_attr=transformer_layers_attr,
                            ff_attrs=ff_attrs,
                            prob_type=prob_type,
                            next_token=next_token,
                            max_sample_steps=max_sample_steps,
                        )
                        layers_scores_list.append(layer_scores.tolist())
                    json.dump(layers_scores_list, f2)
                    f2.write('\n')
                    f2.flush()
            except torch.cuda.OutOfMemoryError as e:
                torch.cuda.empty_cache()
                print(f'<<<{idx}-{i}>>>')
                print(prompt)
                print(ground_truth)
                json.dump(data, f3, ensure_ascii=False)
                f3.write('\n')
                f3.flush()
                continue


if __name__ == '__main__':
    main()