import pickle
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
from modelscope import snapshot_download
from datasets import load_dataset


def save_obj(obj, name):
    """
    将对象保存到文件
    :param obj: 要保存的对象
    :param name: 文件的名称（包括路径）
    """
    with open(name, "wb") as f:
        pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)


def load_obj(name):
    """
    从文件加载对象
    :param name: 文件的名称（包括路径）
    :return: 反序列化后的对象
    """
    with open(name, "rb") as f:
        return pickle.load(f)


def glm4_vllm(prompts, output_dir, temperature=0, max_tokens=1024):
    # GLM-4-9B-Chat-1M
    max_model_len, tp_size = 131072, 1
    model_dir = snapshot_download("ZhipuAI/glm-4-9b-chat")

    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
    llm = LLM(
        model=model_dir,
        tensor_parallel_size=tp_size,
        max_model_len=max_model_len,
        trust_remote_code=True,
        enforce_eager=True,
    )
    stop_token_ids = [151329, 151336, 151338]
    sampling_params = SamplingParams(
        temperature=temperature, max_tokens=max_tokens, stop_token_ids=stop_token_ids
    )

    inputs = tokenizer.apply_chat_template(
        prompts, tokenize=False, add_generation_prompt=True
    )
    outputs = llm.generate(prompts=inputs, sampling_params=sampling_params)

    save_obj(outputs, output_dir)
