import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import time
from transformers import AutoTokenizer, AutoConfig, AddedToken
from vllm import LLM, SamplingParams
from vllm.lora.request import LoRARequest
import torch
from loguru import logger
from tqdm import tqdm
import json
import sys
import multiprocessing
sys.path.append(os.getcwd())

from component.template import template_dict

import argparse



def build_prompt(tokenizer, template, query, history, system=None):
    template_name = template.template_name
    system_format = template.system_format
    user_format = template.user_format
    assistant_format = template.assistant_format
    system = system if system is not None else template.system

    history.append({"role": 'user', 'message': query})
    input_ids = []

    # setting system information
    if system_format is not None:
        # system信息不为空
        if system is not None:
            system_text = system_format.format(content=system)
            input_ids = tokenizer.encode(system_text, add_special_tokens=False)
    # concat conversation
    for item in history:
        role, message = item['role'], item['message']
        if role == 'user':
            message = user_format.format(content=message, stop_token=tokenizer.eos_token)
        else:
            message = assistant_format.format(content=message, stop_token=tokenizer.eos_token)
        tokens = tokenizer.encode(message, add_special_tokens=False)
        input_ids += tokens
        # input_ids = tokenizer(system_text + message, return_tensors="pt")
    input_ids = torch.tensor([input_ids], dtype=torch.long)

    return input_ids


def load_tokenizer(model_name_or_path):
    # config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
    # 加载tokenzier
    tokenizer = AutoTokenizer.from_pretrained(
        model_name_or_path,
        trust_remote_code=True,
        # model_max_length=497,
        use_fast=False
        # llama不支持fast
        # use_fast=False if config.model_type == 'llama' else True
    )

    if tokenizer.__class__.__name__ == 'QWenTokenizer':
        tokenizer.pad_token_id = tokenizer.eod_id
        tokenizer.bos_token_id = tokenizer.eod_id
        tokenizer.eos_token_id = tokenizer.eod_id
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    # assert tokenizer.pad_token_id is not None, "pad_token_id should not be None"
    return tokenizer

def load(engine, model_name_or_path, adapter_name_or_path, gpu_ids=None):
    # 是否使用4bit进行推理，能够节省很多显存，但效果可能会有一定的下降
    load_in_4bit = True

    # 加载模型
    logger.info(f'Loading model from: {model_name_or_path}')
    logger.info(f'adapter_name_or_path: {adapter_name_or_path}')
    
    if engine == "transformers":
        from component.utils import ModelUtils
        model = ModelUtils.load_model(
            model_name_or_path,
            load_in_4bit=load_in_4bit,
            adapter_name_or_path=adapter_name_or_path
        ).eval()

        tokenizer = load_tokenizer(model_name_or_path)
    else:
        if gpu_ids:
            os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, gpu_ids))

        gpu_num = len(gpu_ids) if gpu_ids else torch.cuda.device_count()
        model = LLM(model=model_name_or_path, task="generate", gpu_memory_utilization=0.9, tensor_parallel_size=gpu_num, trust_remote_code=True, enable_lora=True, max_lora_rank=64)
        tokenizer = None
    return model, tokenizer

def run_test(data_path, output_path, model, tokenizer, index_name='id'):
    
    template_name = 'qwen2.5'
    template = template_dict[template_name]
    
    # 生成超参配置
    max_new_tokens = 2048
    top_p = 0.9
    temperature = 0.35
    repetition_penalty = 1.5

    if template.stop_word is None:
        template.stop_word = tokenizer.eos_token
    stop_token_id = tokenizer.encode(template.stop_word, add_special_tokens=False)
    assert len(stop_token_id) == 1
    stop_token_id = stop_token_id[0]

    # 读取数据集
    with open(data_path, 'r', encoding='utf8') as f:
        data_list = f.readlines()
    # 若 Output_path 存在，则读取
    solved_list = []
    if os.path.exists(output_path):
        solved_list = [data[index_name] for data in [eval(line) for line in open(output_path, 'r', encoding='utf8')]]

    os.makedirs(os.path.dirname(output_path), exist_ok=True)

    ## 创建并写入jsonl文件
    f = open(output_path, "a+", encoding="utf-8")
    for data in tqdm(data_list):
        data = eval(data)
        if data[index_name] in solved_list:
            continue
        query = data['conversation'][0]['human']
        query = query.strip()
        # print(query+'\n')
        input_ids = build_prompt(tokenizer, template, query, history=[], system=None).to(model.device)
        attention_mask = torch.ones_like(input_ids)
        # Without streaming
        with torch.no_grad():
            outputs = model.generate(
                    input_ids=input_ids, 
                    attention_mask=attention_mask,
                    max_new_tokens=max_new_tokens, 
                    # use_cache=False,
                    # do_sample=False,
                    do_sample=True, 
                    top_p=top_p, temperature=temperature, repetition_penalty=repetition_penalty, 
                    eos_token_id=stop_token_id,
                    pad_token_id=tokenizer.pad_token_id
            )
        outputs = outputs.tolist()[0][len(input_ids[0]):]
        response = tokenizer.decode(outputs)
        response = response.strip().replace(template.stop_word, "").strip()

        data['label'] = data['conversation'][0]['assistant']
        data['pred'] = response
        f.write(json.dumps(data, ensure_ascii=False) + "\n")
        f.flush()  # flush the buffer to disk

    f.close()

    print(f'最大显存使用量：{round(torch.cuda.max_memory_allocated() / (1024 ** 3), 2)} G')

def run_vllm_process(data_slice, output_path, model_name, adapter_path, gpu_ids, task_name, index_name='id'):
    # 初始化模型
    model, _ = load("vllm", f'model/{model_name}', adapter_path, gpu_ids)

    # 读取数据集
    with open(data_slice, 'r', encoding='utf8') as f:
        data_list = f.readlines()

    # 创建输出文件
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    template_name = 'qwen3'
    template = template_dict[template_name]
    
    ## 创建并写入jsonl文件
    with open(output_path, "a+", encoding="utf-8") as f:
        for line in tqdm(data_list, desc=f"Process {gpu_ids}"):
            data = eval(line)
            if 'system' in data['conversation'][0].keys():
                system = data['conversation'][0]['system']
            else:
                system = template.system
            query = data['conversation'][0]['human'].strip()
            
            message = [
                {"role": "system", "content": system},
                {"role": "user", "content": query},
            ]

            sampling_params = SamplingParams(
                temperature=0.9,
                top_p=0.9,
                max_tokens=4096,
            )
            
            response = model.chat(
                message,
                sampling_params=sampling_params,
                lora_request=LoRARequest(task_name, 1, adapter_path),
                use_tqdm=False
            )[0].outputs[0].text

            data['pred'] = response
            f.write(json.dumps(data, ensure_ascii=False) + "\n")
            f.flush()

    print(f'最大显存使用量：{round(torch.cuda.max_memory_allocated() / (1024 ** 3), 2)} G')

def split_data(data_path, output_path, num_parts, index_name='id'):
    # 若 Output_path 存在，则读取
    solved_ids = set()
    if os.path.exists(output_path):
        solved_ids = {data[index_name] for data in [eval(line) for line in open(output_path, 'r', encoding='utf8')]}
    
    with open(data_path, 'r', encoding='utf8') as f:
        data = [line for line in f if eval(line)[index_name] not in solved_ids]
    
    # 均匀分割数据
    chunk_size = len(data) // num_parts
    return [data[i*chunk_size:(i+1)*chunk_size] for i in range(num_parts)]


def main():
    if step:
        adapter_name = f'adapter/qlora/{model_name}/{adpter_version}/checkpoint-{step}'
    else:
        adapter_name = f'adapter/qlora/{model_name}/{adpter_version}'
    
    data_dir = f'data/instruct_data/{task_name}'
    data_path = f'data/instruct_data/{task_name}/test.jsonl'
    output_dir = f'output/{model_name}'
    output_path = f'output/{model_name}/{adpter_version}{step}.jsonl'

    # 分割数据为2部分
    data_parts = split_data(data_path, output_path, 2, index_name)

    # 定义每个进程使用的GPU
    gpu_groups = [[0, 1, 2, 3], [4, 5, 6, 7]]  # 2个进程，每个使用4张GPU

    processes = []
    for i, (data_part, gpu_ids) in enumerate(zip(data_parts, gpu_groups)):
        if not data_part:  # 如果没有数据需要处理，跳过
            continue

        # 为每个进程创建临时数据文件
        temp_data_path = f"{data_dir}/{adpter_version}{step}_temp_part_{i}.jsonl"
        with open(temp_data_path, 'w', encoding='utf8') as f:
            f.writelines(data_part)

        # 每个进程有独立的输出文件
        process_output = f"{output_dir}/{adpter_version}{step}_part{i}.jsonl"

        p = multiprocessing.Process(
            target=run_vllm_process,
            args=(temp_data_path, process_output, model_name, adapter_name, gpu_ids, task_name, index_name)
        )
        processes.append(p)
        p.start()
    
    for p in processes:
        p.join()
    
    # 合并结果
    with open(output_path, 'a+', encoding='utf8') as outfile:
        for i in range(2):
            part_file = f"{output_dir}/{adpter_version}{step}_part{i}.jsonl"
            with open(part_file, 'r', encoding='utf8') as infile:
                outfile.writelines(infile.readlines())
            #os.remove(part_file)  # 删除临时文件
            #os.remove(f"{output_dir}/temp_part_{i}.jsonl")  # 删除临时数据文件

if __name__ == '__main__':
    model_name = "Qwen3-32B"
    task_name = 'xxwlw3'
    adpter_version = 'xxwlw3'
    index_name = 'paper_id'
    # step = None
    step = 1406
    engine = "vllm"  # vllm/transformers/api
    main()