import json
import torch
import os
from vllm import LLM, SamplingParams
import vllm
from tqdm import tqdm
print("vllm version:", vllm.__version__)
print("vllm location:", vllm.__file__)

prompt = """{
"用户":"你好",
"助手":"您好",
"用户":"找出我这周的工作日报.",
"助手":"好的，如下是您这周的工作日报, ...",
"用户":"合并整理为周报，格式要求……",
"助手":"好的，下面是整理后的周报，……",
"用户":"将第二条挪到最开头，并……",
"助手":"好的，下面是修改后的周报，……",
"用户":"将周报发给xxx",
"助手":"好的，……，请问确认发送吗",
"用户":"没问题，发送"
"助手":"好的，……，已发送，<tool_call>...</tool_call>",
}

仿照上面的对话生成一些其他场景中用户和助手的对话，对话中助手的回答需要有一次 使用system中的API进行<tool_call>。使用json格式返回。
"""
import time 
from transformers import AutoTokenizer, AutoModelForCausalLM
if __name__ == '__main__':

    tokenizer = AutoTokenizer.from_pretrained("/mnt/diskhd/Backup/DownloadModel/Qwen2.5-72B-Instruct-GPTQ-Int4/")

    sampling_params = SamplingParams(temperature=0.7, max_tokens=3200)
    
    available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
    print(available_gpus)
    llm = LLM(
            model="/mnt/diskhd/Backup/DownloadModel/Qwen2.5-72B-Instruct-GPTQ-Int4/", 
            #model="/mnt/diskhd/Backup/DownloadModel/Qwen2.5-7B-Instruct/",
            tensor_parallel_size=len(available_gpus), 
            dtype=torch.bfloat16, 
            gpu_memory_utilization=0.95, 
            #kv_cache_dtype="fp8",
            max_model_len=8192, 
            enable_prefix_caching=True
            )
    
    answers = []
    data = []
    with open("test.jsonl", "r", encoding='utf-8') as f:
        for line in f:
            data.append(json.loads(line))
        print(len(data))
        for item in tqdm(data[:]):

            prompts = []
            questions = "["
            for index, q in enumerate(item["user_messages"]):
                questions += str(index + 1) + ". " + q + "\n"
            questions = questions[:-1] + "]"

            for i in range(len(item["apis"])):
                tools = item["apis"][i:i+2]
                
                message = [
                    #{"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
                    {"role": "user",  "content": questions + "\n\n\n以用户的身份仿照上面的问题，使用system中的API，设计其他场景的一串问题。返回一个list。"},
                ]
                prompt = tokenizer.apply_chat_template(
                    message,
                    tools=tools,
                    tokenize=False,
                    add_generation_prompt=True
                ) + "[1. "
                prompts.append(prompt)

            outputs = llm.generate(prompts, sampling_params=sampling_params)
            item["qa"] = ["[1. " + outputs[i].outputs[0].text for i in range(len(prompts))]
            #print("output: ", outputs)

            answers.append(item)
            if len(answers) % 50 == 10:
                with open("question_3.jsonl", "w", encoding='utf-8') as f:
                    for item in answers:
                        f.write(json.dumps(item, ensure_ascii=False) + "\n")

    with open("question_3.jsonl", "w", encoding='utf-8') as f:
        for item in answers:
            f.write(json.dumps(item, ensure_ascii=False) + "\n")
    
    


 
