from sentence_transformers import SentenceTransformer, CrossEncoder
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa

model = SentenceTransformer("all-MiniLM-L6-v2")
# 加载预训练的Cross-Encoder模型，用于句子对相关性评分
rerank_model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def process_text(text, max_length=512):
    # 去掉多余的空白空符
    text = " ".join(text.split())
    if len(text) > max_length:
        text = text[:max_length] + "..."
    return text


def build_query_document_pairs(query, documents):
    pairs = []
    for doc in documents:
        pair = [query, doc]
        pairs.append(pair)
    return pairs


def cross_encoder_rerank(query, documents, top_k=5):
    # 1.文档的预处理
    process_query = process_text(query)
    # 2.构建查询文档对
    processed_docs = [process_text(doc) for doc in documents]
    query_doc_pairs = build_query_document_pairs(process_query, processed_docs)
    # 使用corss-encoder 计算相关性的分数 [1,2,3]
    scores = rerank_model.predict(query_doc_pairs)
    # [(documents[0]:1.2),(documents[1]:-1.2),(documents[2]:2.1)]
    doc_score_pairs = list(zip(documents, [float(score) for score in scores]))
    # [(documents[2]:2.1),(documents[0]:1.2),(documents[1]:-1.2)]
    sorted_docs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
    # 选择top-k的文档
    top_docs = [doc for doc, score in sorted_docs[:top_k]]
    return top_docs


def compare_retrieval_methods(query, query_embedding):
    # 传统的向量查询 返回更多候选文档
    candidate_docs = retrieve_related_chunks(query_embedding, n_results=10)
    # 2. Cross-Encoder重排序
    reranked_docs = cross_encoder_rerank(query, candidate_docs, top_k=5)
    return reranked_docs


def generate_answer_with_reranked_context(query, documents):
    context = "\n".join(documents)
    prompt = """
        基于以下通过文档重排序筛选出来的相关信息，回答用户的问题
        重排序后的相关信息:{context}
        用户的问题为:{query}

        注意：系统使用了Cross-Encoder重排序技术，能够更准确地评估查询与文档之间的相关性，有效避免相似度陷阱问题。

        请提供一个准确、全面的回答，确保：
        1. 直接回答用户的查询
        2. 充分利用重排序筛选出的相关信息
        3. 如果信息不足，请明确指出
        4. 保持回答的准确性和相关性
    """.format(
        context=context, query=query
    )
    result = ollama_qa(prompt)
    return result


if __name__ == "__main__":
    query = "婚假怎么请？"
    query_embedding = get_query_embedding(query)
    # 1.传统检索和重排序的对比
    rerank_documents = compare_retrieval_methods(query, query_embedding)
    # 2.基于重排序后的结果生成答案
    final_answer = generate_answer_with_reranked_context(query, rerank_documents)
    print(f"final_answer:{final_answer}")
