import json
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# 本地模型路径
local_model_path = "../.././Model/Qwen2.5-0.5B-Instruct"

# 从本地加载分词器，支持远程代码
tokenizer = AutoTokenizer.from_pretrained(local_model_path, trust_remote_code=True)
# 从本地加载模型，自动分配设备，设置为评估模式
model = AutoModelForCausalLM.from_pretrained(local_model_path, device_map="auto", trust_remote_code=True).eval()

# 加载 JSON 数据库文件
def load_database(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        return json.load(f)

# 将数据库知识转换为模型可理解的文本
def prepare_knowledge_text(database):
    knowledge_text = ""
    for key, value in database.items():
        knowledge_text += f"知识类型：{value['knowledge_type']}\n知识内容：{value['knowledge_content']}\n"
    return knowledge_text

# 根据问题和知识生成回答
def generate_response(question, knowledge_text):
    # 构造输入文本
    input_text = f"以下是相关知识：\n{knowledge_text}\n问题：{question}\n请严格依据上述知识回答问题。回答条理清晰，语气温和！"
    # 分词并移到模型设备
    inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
    with torch.no_grad():
        # 生成回答，使用贪心搜索且单束搜索
        outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, num_beams=1)
    # 解码并去除输入文本得到回答
    response = tokenizer.decode(outputs[0], skip_special_tokens=True).replace(input_text, "").strip()
    return response

# 数据库路径
db_source = './DBFiles/knowledge_db.json'
# 加载数据库
database = load_database(db_source)
# 准备知识文本
knowledge_text = prepare_knowledge_text(database)

def get_answer(question):
    # 生成并返回回答
    response = generate_response(question, knowledge_text)
    return response

if __name__ == "__main__":
    # 简单的测试逻辑
    question = "请问可以帮我科普一有关焦虑的知识吗？"
    answer = get_answer(question)
    print(answer)