import os
import numpy as np
from flag_embedding import BGEM3FlagModel
from rank_bm25 import BM25Okapi
from sentence_transformers import CrossEncoder
from sklearn.metrics.pairwise import cosine_similarity

# ==== 1. 示例文档 ====
docs = [
    "Python是一种高级编程语言，由Guido van Rossum于1989年圣诞节期间开发。",
    "Python的设计哲学强调代码可读性，使用缩进代替大括号。",
    "Python常被用于Web开发、数据科学、人工智能等领域。",
    "JavaScript是一种常用于Web前端开发的编程语言。",
    "机器学习是人工智能的一个分支，研究计算机如何自动学习。"
]

# ==== 2. 稠密检索（Dense Retrieval） ====
def dense_retrieval(docs, query, top_k=3):
    # 加载 BAAI/bge-m3 模型
    model = BGEM3FlagModel('BAAI/bge-m3')
    
    # 生成文档向量
    doc_embeddings = model.encode(docs, batch_size=32)["dense_vecs"]
    
    # 生成查询向量
    query_embedding = model.encode([query])["dense_vecs"][0]
    
    # 计算余弦相似度
    similarities = cosine_similarity([query_embedding], doc_embeddings)[0]
    
    # 取 top_k
    top_indices = np.argsort(similarities)[::-1][:top_k]
    return [(docs[i], similarities[i]) for i in top_indices]

# ==== 3. 稀疏检索（Sparse Retrieval） ====
def sparse_retrieval(docs, query, top_k=3):
    # 分词（简单按空格）
    tokenized_docs = [doc.split(" ") for doc in docs]
    
    # 初始化 BM25
    bm25 = BM25Okapi(tokenized_docs)
    
    # 搜索
    tokenized_query = query.split(" ")
    scores = bm25.get_scores(tokenized_query)
    
    # 取 top_k
    top_indices = np.argsort(scores)[::-1][:top_k]
    return [(docs[i], scores[i]) for i in top_indices]

# ==== 4. 结果融合 ====
def hybrid_fusion(dense_results, sparse_results, dense_weight=0.5, sparse_weight=0.5):
    # 归一化分数
    dense_scores = np.array([score for _, score in dense_results])
    sparse_scores = np.array([score for _, score in sparse_results])
    
    dense_scores = (dense_scores - dense_scores.min()) / (dense_scores.max() - dense_scores.min() + 1e-8)
    sparse_scores = (sparse_scores - sparse_scores.min()) / (sparse_scores.max() - sparse_scores.min() + 1e-8)
    
    # 融合分数
    fused = {}
    for i, (doc, score) in enumerate(dense_results):
        fused[doc] = dense_weight * dense_scores[i]
        
    for i, (doc, score) in enumerate(sparse_results):
        if doc in fused:
            fused[doc] += sparse_weight * sparse_scores[i]
        else:
            fused[doc] = sparse_weight * sparse_scores[i]
    
    # 排序
    return sorted(fused.items(), key=lambda x: x[1], reverse=True)

# ==== 5. Reranker 二次排序 ====
def rerank(results, query, top_k=3):
    # 加载交叉编码器
    model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
    
    # 构造 (query, doc) 对
    pairs = [(query, doc) for doc, _ in results]
    
    # 计算相关性分数
    scores = model.predict(pairs)
    
    # 排序
    return [(results[i][0], scores[i]) for i in np.argsort(scores)[::-1][:top_k]]

# ==== 主流程 ====
if __name__ == "__main__":
    query = "Python 有哪些应用领域？"
    
    # 1. 稠密检索
    dense_results = dense_retrieval(docs, query, top_k=3)
    print("📊 稠密检索结果：")
    for doc, score in dense_results:
        print(f"  {score:.4f} - {doc}")
    
    # 2. 稀疏检索
    sparse_results = sparse_retrieval(docs, query, top_k=3)
    print("\n📊 稀疏检索结果：")
    for doc, score in sparse_results:
        print(f"  {score:.4f} - {doc}")
    
    # 3. 融合
    fused_results = hybrid_fusion(dense_results, sparse_results)
    print("\n📊 融合结果：")
    for doc, score in fused_results:
        print(f"  {score:.4f} - {doc}")
    
    # 4. Rerank
    final_results = rerank(fused_results, query, top_k=3)
    print("\n📊 Rerank 最终结果：")
    for doc, score in final_results:
        print(f"  {score:.4f} - {doc}")