import random
from vllm import LLM, SamplingParams
from static_var import llm_cls_prompt_pkl, llm_infer_result_pkl

import sys

sys.path.append("..")
from utils import load_obj, save_obj


prompts = []

sampling_params = SamplingParams(
    temperature=0.7,
    top_p=0.95,
    max_tokens=128,
    stop="<|im_end|>",
)

# model_path = "/home/jie/.cache/mollm_infer_result_pkldelscope/hub/qwen/Qwen-14B-Chat"
model_path = "/home/jie/.cache/modelscope/hub/qwen/Qwen1___5-14B-Chat"


llm = LLM(
    model=model_path,
    trust_remote_code=True,
    tokenizer=model_path,
    tokenizer_mode="auto",
    max_model_len=16016,
    enforce_eager=True,
)

# template = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{info}<|im_end|>\n<|im_start|>assistant\n"
template = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{info}<|im_end|>\n<|im_start|>assistant\n"
data = []
raw_data = load_obj(llm_cls_prompt_pkl)

for item in raw_data:
    data.append(template.format(info=item))


outputs = llm.generate(random.sample(data, 200000), sampling_params)

save_obj(outputs, llm_infer_result_pkl)

# nohup python vllm_infer.py > infer_cls1.log 2>&1 &