from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa
import jieba

# 导入numpy库，用于数值计算
import numpy as np

# 导入rank_bm25库，用于BM25稀疏检索
from rank_bm25 import BM25Okapi

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


# 获取知识库中所有的文档
def get_all_documents():
    # 查询集合中的所有的文档
    results = collection.get()
    documents = results.get("documents", [])
    return documents


# 构建BM25索引
def buid_bm25_index(documents):
    tokenized_docs = []
    for doc in documents:
        # 使用jieba分词工具进行中文分词
        tokens = list(jieba.cut(doc))
        tokenized_docs.append(tokens)
    bm25_index = BM25Okapi(tokenized_docs)
    return bm25_index, tokenized_docs


def bm25_retrieval(query, bm25, documents, n_results):
    # 对query进行分词
    query_tokens = list(jieba.cut(query))
    # 计算BM25分数
    bm25_scores = bm25.get_scores(query_tokens)
    # 获取分数最高的文档的索引
    top_indices = np.argsort(bm25_scores)[::-1][:n_results]
    bm25_results = []
    for i, idx in enumerate(top_indices):
        bm25_results.append(
            {"document": documents[idx], "score": bm25_scores[idx], "rank": i + 1}
        )
    return bm25_results


def dense_retrieval(query, n_results):
    # 把查询向量化
    query_embedding = get_query_embedding(query)
    # 执行向量搜索
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    # 获取最相关的文档
    documents = results.get("documents", [])
    documents = documents[0]
    # 获取距离 L2 几何距离 余弦相似度 二进制
    distances = results.get("distances", [])
    distances = distances[0]
    dense_results = []
    for i, (doc, distance) in enumerate(zip(documents, distances)):
        dense_results.append(
            # 将距离等值归化为统一的0到1之间的小数，以便 后续进行融合
            # 欧式距离 0-正无穷 余弦距离 0-1
            # 最终的目的是距离越小，相似度越高 distance越大，分数越接近于0，越不相似。1最相似
            {"document": doc, "score": 1.0 / (1.0 + distance), "rank": i + 1}
        )
    return dense_results


def calculate_weighted_rank(rank, k=60):
    return 1.0 / (k + rank)


# 最后融合分数
def fuse_retrieval_results(bm25_results, dense_results, dense_weight, sparse_weight):
    document_scores = {}
    for result in bm25_results:
        doc = result["document"]
        rank = result["rank"]
        #  计算得到加权后的bm25的分数
        score = calculate_weighted_rank(rank) * sparse_weight
        document_scores[doc] = document_scores.get("doc", 0) + score
    for result in dense_results:
        doc = result["document"]
        rank = result["rank"]
        #  计算得到加权后的稠密检索的分数
        score = calculate_weighted_rank(rank) * dense_weight
        document_scores[doc] = document_scores.get("doc", 0) + score
    sorted_documents = sorted(document_scores.items(), key=lambda x: x[1], reverse=True)
    return [doc for doc, score in sorted_documents]


def generate_answer(query, retrieved_documents):
    context = "\n".join(retrieved_documents)
    prompt = f"""
        基于以下通过混合检索技术获取的相关信息，请回答用户的查询。

        检索到的相关信息：
        {context}

        用户查询：{query}

        注意：系统使用了混合检索技术，结合了稀疏检索（关键词匹配）和密集检索（语义理解）的优势。

        请提供一个准确、全面的回答，确保：
        1. 直接回答用户的查询
        2. 充分利用检索到的相关信息
        3. 如果信息不足，请明确指出
        4. 保持回答的准确性和相关性

        回答：
    """
    answer = ollama_qa(prompt)
    return answer


def hybrid_retrieval(query, n_results=5, dense_weight=0.7, sparse_weight=0.3):
    # 获取所有的文档
    all_documents = get_all_documents()
    # 构建BM25索引
    bm25_index, tokenized_docs = buid_bm25_index(all_documents)
    # 执行双路并行检索 BM25稀疏检索
    bm25_results = bm25_retrieval(query, bm25_index, all_documents, n_results)
    # 执行稠密检索
    dense_results = dense_retrieval(query, n_results)
    # 融合检索结果并排序
    fused_documents = fuse_retrieval_results(
        bm25_results, dense_results, dense_weight, sparse_weight
    )
    final_documents = fused_documents[:n_results]
    return final_documents


if __name__ == "__main__":
    query = "年假如何申请?"
    # 执行混合检索，获取相关的文档
    retrieved_documents = hybrid_retrieval(
        query, n_results=5, dense_weight=0.7, sparse_weight=0.3
    )
    final_answer = generate_answer(query, retrieved_documents)
    print(f"final_answer:{final_answer}")
