from transformers import AutoModelForCausalLM, AutoTokenizer
import torch


def inference(model,tokenizer, prompt):
    """
    生成模型推理
    :param model: 生成模型
    :param tokenizer: 分词器
    :param prompt: 输入的提示文本
    :return: 生成的文本
    """
    # 对文本进行编码
    inputs = tokenizer(prompt, return_tensors="pt")

    # 生成输出
    with torch.no_grad():
        outputs = model.generate(**inputs, max_new_tokens=50)

    # 解码输出
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    print(generated_text)

    return generated_text


def load_inference_model(model_name):
    """
    加载预训练的tokenizer和模型
    :param model_name: 模型名称
    :return: tokenizer和模型
    """
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    return tokenizer, model