from transformers import AutoModelForCausalLM, AutoTokenizer,DataCollatorWithPadding
from torch.utils.data import DataLoader
import torch
import datasets
import tqdm
import json
import sys
# Load metric
from metrics import compute_metrics,compute_metrics_self
from test_list_right import check_lists

def create_messages(prompt):
    """
    根据 prompt 创建 messages 模板。
    """
    return [
        {"role": "user", "content": prompt}
    ]

def batch_generate(instruction,tokenizer, model, max_new_tokens=512):
    generated_texts = []

    for prompt in instruction:
        # 创建 messages 模板
        messages = create_messages(prompt)
         # 将 messages 转换为模型输入
        input_tensor = tokenizer.apply_chat_template(
            messages,
            add_generation_prompt=True,
            return_tensors="pt"  # 返回 PyTorch 张量
        )
        output_tensor = model.generate(input_tensor.to(model.device), pad_token_id=tokenizer.eos_token_id,max_new_tokens=max_new_tokens)
        # 解码模型输入（可选，用于调试）
        decoded_input = tokenizer.decode(output_tensor[0][input_tensor.shape[1]:], skip_special_tokens=True)
        decoded_input = decoded_input.replace("\n","").replace("[","").replace("]","").replace("'","")
        
        # 保存结果
        generated_texts.append(decoded_input)

    return generated_texts


def eval_llm(model_dir):
    model_dir = model_dir
    #  
    tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto",padding_side='left',trust_remote_code=True)
    # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
    # 
    model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, pad_token_id=tokenizer.eos_token_id,torch_dtype=torch.float16)
    # (Optional) If on low resource devices, you can load model in 4-bit or 8-bit to further save GPU memory via bitsandbytes.
    # InternLM 7B in 4bit will cost nearly 8GB GPU memory.
    # pip install -U bitsandbytes
    # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True)
    # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True)
    model = model.eval()


    test_num=5000 #测试数据条数
    batch_size=16


    #prepare data and dataloader
    dataset = datasets.load_dataset('json', data_files='./datasets/zhongyao_eval_5k_without_prompt.jsonl',split=f"train[:{test_num}]")
    references =dataset['output'][:test_num]
    hypotheses = []
    
    # def tokenize_function(examples):
    #     res = tokenizer(examples['instruction'], truncation=True, padding=True)
    #     return res
    
    # tokenized_dataset = dataset.map(tokenize_function, batched=True,remove_columns=['instruction', 'output'])
    
    # data_collator = DataCollatorWithPadding(tokenizer, return_tensors="pt")
    # dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=data_collator)
    dataloader = DataLoader(dataset, batch_size=batch_size)
    #generate responses
    for batch in tqdm.tqdm(dataloader):
        docode_res = batch_generate(batch['instruction'],tokenizer,model)
        print('docode_res is ',docode_res)
        hypotheses.extend(docode_res)
    with open("zhongyao_singledata-llm-7b.txt", "w") as file:
        for item in hypotheses:
            file.write(f"{item}\n")  # 每个元素占一行
    print(compute_metrics((hypotheses,references)))
    return hypotheses,references
def extend_list(original_list, target_length):
    # 计算需要复制的次数
    repeat_times = (target_length + len(original_list) - 1) // len(original_list)
    # 复制列表
    extended_list = (original_list * repeat_times)[:target_length]
    return extended_list

if __name__=='__main__':
    model_dir_ori = 'deepseek-llm-7b-chat'
    model_dir_me = '/root/autodl-tmp/merge_ori_lora_llm/zhongyao_singledata-2048-16-32-epoch-2-lora_rank-64'
    eval_list = [model_dir_me,model_dir_ori]
    i = 0
    for i in range(1,len(eval_list)):
        hypotheses = []
        print('now eval llm model is ',eval_list[i])
        hypotheses,references = eval_llm(eval_list[i])
        res = compute_metrics_self(hypotheses,references)
        print(res)