import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import torch
from tqdm import tqdm
import json
import sys
sys.path.append(os.getcwd())
from vllm import LLM, SamplingParams
from vllm.lora.request import LoRARequest


def extract_answer(text: str) -> str:
    if "否" in text or "不是" in text:
        answer = "否"
    else:
        answer = "是"
    return answer

def run_test(data_path, output_path, llm):

    index_name = 'conversation_id'
    # 生成超参配置

    # 读取数据集
    with open(data_path, 'r', encoding='utf8') as f:
        data_list = f.readlines()
    # 若 Output_path 存在，则读取
    solved_list = []
    if os.path.exists(output_path):
        solved_list = [data[index_name] for data in [eval(line) for line in open(output_path, 'r', encoding='utf8')]]

    os.makedirs(os.path.dirname(output_path), exist_ok=True)

    ## 创建并写入jsonl文件
    f = open(output_path, "a+", encoding="utf-8")
    for data in tqdm(data_list):
        data = eval(data)
        if data[index_name] in solved_list:
            continue
        user_prompt = data['conversation'][0]['human'] + "只输出“是”或“否”，不要输出其他内容。"
        message = [
            {"role" : "user", "content" : user_prompt},
        ]

        sampling_params = SamplingParams(
            temperature = 0.8,
            top_p = 0.95,
            max_tokens = 1024,
        )
        output = llm.chat(
            message,
            sampling_params = sampling_params,
            use_tqdm = False
        )[0].outputs[0].text

        data['label'] = data['conversation'][0]['assistant']
        data['output'] = output
        try:
            data['pred'] = extract_answer(output)
        except:
            data['pred'] = '是'
        f.write(json.dumps(data, ensure_ascii=False) + "\n")
        f.flush()  # flush the buffer to disk

    f.close()

    print(f'最大显存使用量：{round(torch.cuda.max_memory_allocated() / (1024 ** 3), 2)} G')

def main(model_name, task_name):

    model_path = f"model/{model_name}"
    data_path = f'data/instruct_data/{task_name}/test.jsonl'
    output_path = f'output/{model_name}/{task_name}_prompt.jsonl'
    llm = LLM(model=model_path, task="generate", gpu_memory_utilization=0.9, trust_remote_code=True)
    run_test(data_path, output_path, llm)

if __name__ == '__main__':
    model_name = "Qwen2.5-7B-Instruct"
    task_name = 'deie'
    main(model_name, task_name)
    