import json
import torch
import os
from vllm import LLM, SamplingParams
import vllm
from tqdm import tqdm
print("vllm version:", vllm.__version__)
print("vllm location:", vllm.__file__)
import re

import time 
if __name__ == '__main__':
    sampling_params = SamplingParams(temperature=0.1, max_tokens=360)
    
    available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
    print(available_gpus)
    llm = LLM(
            model="/mnt/diskhd/Backup/DownloadModel/Qwen2.5-72B-Instruct-GPTQ-Int4/", 
            #model="/mnt/diskhd/Backup/DownloadModel/Qwen2.5-7B-Instruct/",
            tensor_parallel_size=len(available_gpus), 
            dtype=torch.bfloat16, 
            gpu_memory_utilization=0.95, 
            #kv_cache_dtype="fp8",
            max_model_len=6000, 
            enable_prefix_caching=True
            )
    
    answers = []
    data = []
    save_path = "train_part0.jsonl"
    length = 0
    with open(save_path, "r", encoding='utf-8') as f:
        for line in f:
            answers.append(json.loads(line))
            length += 1

    for i in range(0,1,1):
        with open(f"question_{i}.jsonl", "r", encoding='utf-8') as f:
            for line in f:
                tmp = json.loads(line)

                if len(tmp["qa"]) == 0:
                    continue


                for qa in tmp["qa"]:
                    if len(qa) == 0 or "请" not in qa:
                        continue

                    match = re.findall(r'\[(.*?)\]', qa, re.DOTALL)
                    if len(match) < 1:
                        continue

                    #print(qa, "\n\n")
                    temp = tmp.copy()
                    temp["user_messages"] = qa
                    #print("temp: ", temp)
                    data.append(temp)
            

    print(len(data), length)
    for item in tqdm(data[length:]):
        tools = item["apis"]
        #print('item["user_messages"] :', item)


        #print('item["user_messages"]:', [item["qa"]], [item["user_messages"]], "\n\n\n")
        match = re.findall(r'\[(.*?)\]', item["user_messages"], re.DOTALL)
        #print('match[0]: ', ["\n" + match[0]], "\n\n\n")
        user_messages = re.split(r"\n\d+\. ", "\n" + match[0])
        user_messages = [line for line in user_messages if len(line) > 8]
        #print('item["user_messages"] :', user_messages, "\n\n\n")
        
        
        item["targets"] = []

        '''
        questions = ""
        item["message"] = []
        for question in user_messages:
            questions += question + " "
            prompt = '"' + questions + '"\n\n上面这个问题是否需要system中的某个工具, 是否给全了工具调用所需的所有参数？给出理由。给出理由后，如果需要请给出工具调用，否则只给出不需要使用工具的结论。'
            #question = question + '如果需要使用system中的<tool_call>，回答时请先给出。'

            message = [{"role": "user",  "content": prompt}]
            outputs = llm.chat(message, sampling_params=sampling_params, tools=tools)
            print("outputs: ", outputs, "\n\n\n")
            answer = outputs[0].outputs[0].text
            #print("answer: ", answer)
            message.append({"role": "assistant",  "content": answer})
            item["message"].append(message)
        '''
        

        message = []
        for question in user_messages:
            question = '"' + question + '"\n\n上面这个问题是否需要system中的某个工具？上下文会话中是否给全了工具调用所需的所有参数？给出理由。给出理由后，如果需要请给出工具调用，否则只回答不需要使用工具。'
            #question = question + '如果需要使用system中的<tool_call>，回答时请先给出。'

            message.append({"role": "user",  "content": question})
            outputs = llm.chat(message, sampling_params=sampling_params, tools=tools)
            #print("outputs: ", outputs, "\n\n\n")
            answer = outputs[0].outputs[0].text
            #print("answer: ", answer)
            message.append({"role": "assistant",  "content": answer})
        
        item["message"] = message


        answers.append(item)
    
        if len(answers) % 100 == 10:
            with open(save_path, "w", encoding='utf-8') as f:
                for item in answers:
                    f.write(json.dumps(item, ensure_ascii=False) + "\n")

    with open(save_path, "w", encoding='utf-8') as f:
        for item in answers:
            f.write(json.dumps(item, ensure_ascii=False) + "\n")
    
    


 