import json
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# 本地模型路径
local_model_path = "../.././Model/Qwen2.5-0.5B-Instruct"

# 从本地加载分词器和模型
tokenizer = AutoTokenizer.from_pretrained(local_model_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(local_model_path, device_map="auto", trust_remote_code=True).eval()

# 将问题发送给模型并生成回答
def generate_response(question):
    # 构造输入文本（直接使用问题作为输入）
    input_text = question
    # 分词并移到模型设备
    inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
    with torch.no_grad():
        # 生成回答，使用贪心搜索且单束搜索
        outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, num_beams=1)
    # 解码并去除输入文本得到回答
    response = tokenizer.decode(outputs[0], skip_special_tokens=True).replace(input_text, "").strip()
    return response

# 生成并返回回答
def get_answer(question):
    response = generate_response(question)
    return response

if __name__ == "__main__":
    # 简单的测试逻辑
    question = "请问可以帮我科普一下有关焦虑的知识吗？"
    answer = get_answer(question)
    print(answer)