from major.models_manager import embedding_model, chat_model
from langchain_chroma import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser


vectorstore = Chroma(
    embedding_function=embedding_model.get_model(),
    persist_directory='./chroma_db')


def generate_response(query: str, k: int = 10):

    retrieved_docs = vectorstore.similarity_search(query, k=k)

    prompt_template = ChatPromptTemplate.from_messages([
        ("system",
         "你是一个根据上下文内容回答用户问题的助手，若上下文中无相关问题的答案，则回答不知道。\n\n上下文：\n{context}"),
        ("human", "{question}")
    ])


    context = "\n\n".join([f"来源：{doc.metadata['source']}\n内容：{doc.page_content}"
                           for doc in retrieved_docs])

    chain = prompt_template | chat_model.get_model() | StrOutputParser()


    print("\n正在生成回复...")
    response = chain.invoke({
        "context": context,
        "question": query
    })

    return response



query = "自注意力是什么"
print(f"\n用户提问：{query}")
response = generate_response(query)
print("\n=== 生成的回复 ===")
print(response)