from major.models_manager import embedding_model, chat_model
from langchain_chroma import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser


vectorstore = Chroma(
    embedding_function=embedding_model.get_model(),
    persist_directory='./profile_chroma_db')

def generate_response(query: str, k: int = 3):

    retrieved_docs = vectorstore.similarity_search(query, k=k)

    prompt_template = ChatPromptTemplate.from_messages([
        ("system",
         "你是一个根据简历推荐应聘者的助手，如果信息不足，请告知。\n\n上下文：\n{context}"),
        ("human", "{question}")
    ])

    context = "\n\n".join([f"来源：{doc.metadata['source']}\n内容：{doc.page_content}"
                           for doc in retrieved_docs])

    chain = prompt_template | chat_model.get_model() | StrOutputParser()


    print("\n正在生成回复...")
    response = chain.invoke({
        "context": context,
        "question": query
    })

    return response


query = "我需要做过NLP项目的人"
print(f"\n用户提问：{query}")
response = generate_response(query)
print("\n=== 生成的回复 ===")
print(response)