from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def rewrite_query_with_leave_type(query):
    prompt = """
        请从下列的员工提问中识别出最相关的假期类型(如年假,病假,婚假,产假,陪产假,丧假)
        只输出假期类型标准的词汇，不要输出其它内容
        员工的提问:{query}
        假期类型:
    """.format(
        query=query
    )
    leave_type = ollama_qa(prompt)
    return leave_type.strip() if leave_type else query


if __name__ == "__main__":
    query = "我下个月结婚，可以请几天假?"
    # 识别用户的类型，进行提问 的重写
    leave_type = rewrite_query_with_leave_type(query)
    # 用假期类型的标准词汇进行向量化
    query_embedding = get_query_embedding(leave_type)
    print(f"query_embedding:{len(query_embedding)}")
    related_chunks = retrieve_related_chunks(query_embedding, 3)
    print(f"related_chunks:{related_chunks}")
    query_context = "\n".join(related_chunks)
    print(f"query_context:{query_context}")
    prompt = f"已知信息:{query_context}\n\n请根据上述的内容回答用户下面的问题:{query}"
    print(f"prompt:{prompt}")
    answer = ollama_qa(prompt)
    print(f"answer:{answer}")
