import pickle
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
from modelscope import snapshot_download
from datasets import load_from_disk


dataset = load_from_disk("hydrogen_binary_dataset_11w")
model_name = "ZhipuAI/glm-4-9b-chat"
model_name = snapshot_download(model_name)


# GLM-4-9B-Chat
# 如果遇见 OOM 现象，建议减少max_model_len，或者增加tp_size
max_model_len, tp_size = 131072, 1
# prompt = [
#     [{"role": "user", "content": "你好，请你做一下自我介绍！"}],
#     [{"role": "user", "content": "请问台湾属于哪个国家？"}],
# ]

prompt = [
            [{"role": "user", "content": dataset["train"][i]["prompt"]}]
            for i in range(len(dataset["train"]))
        ]

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
llm = LLM(
    model=model_name,
    tensor_parallel_size=tp_size,
    max_model_len=max_model_len,
    trust_remote_code=True,
    enforce_eager=True,
    # GLM-4-9B-Chat-1M 如果遇见 OOM 现象，建议开启下述参数
    # enable_chunked_prefill=True,
    # max_num_batched_tokens=8192
)
stop_token_ids = [151329, 151336, 151338]
sampling_params = SamplingParams(
    temperature=0.95, max_tokens=1024, stop_token_ids=stop_token_ids
)

inputs = tokenizer.apply_chat_template(
    prompt, tokenize=False, add_generation_prompt=True
)

outputs = llm.generate(prompts=inputs, sampling_params=sampling_params)

# 保存到本地
# 将生成的复杂对象保存到本地文件  

with open('output_11w.pkl', 'wb') as f:
    pickle.dump(outputs, f)