# === Step 1: 安装依赖 ===
# pip install -U FlagEmbedding scikit-learn

from FlagEmbedding import BGEM3FlagModel
from sklearn.metrics.pairwise import cosine_similarity

# === Step 2: 加载模型 ===
model = BGEM3FlagModel("../model/bge-m3", use_fp16=False)  # 可选 use_fp16 加速推理

# === Step 3: 准备数据 ===
passages = [
    "文档1：使用混合精度训练和分布式训练可以显著加速深度学习模型的训练。",
    "文档2：优化器选择（如AdamW）和学习率调度策略对训练速度至关重要。",
    "文档3：数据预处理和批大小调整是提升训练效率的关键步骤。",
]

query = "如何优化深度学习模型的训练速度？"

# === Step 4: 文本编码（生成稠密、稀疏、多向量） ===
passage_embeddings = model.encode(
    passages,
    return_dense=True,
    return_sparse=True,
    return_colbert_vecs=True
)

query_embeddings = model.encode(
    [query],
    return_dense=True,
    return_sparse=True,
    return_colbert_vecs=True
)

# === Step 5: 计算稠密检索得分（Cosine Similarity） ===
dense_scores = cosine_similarity(
    query_embeddings["dense_vecs"],
    passage_embeddings["dense_vecs"]
)
dense_scores = dense_scores.tolist()[0]

# === Step 6: 计算稀疏检索得分（关键词匹配） ===
def sparse_similarity(query_vec, passage_vec):
    lexical_scores = model.compute_lexical_matching_score(
        query_vec, passage_vec)
    return lexical_scores

sparse_scores = [
    sparse_similarity(query_embeddings["lexical_weights"][0], p)
    for p in passage_embeddings["lexical_weights"]
]


# === Step 7: 计算多向量检索得分（MaxSim） ===
def multi_similarity(query_vec, passage_vec):
    colbert_score = model.colbert_score(
        query_vec, passage_vec)
    return colbert_score.item()

maxsim_scores = [
    multi_similarity(query_embeddings["colbert_vecs"][0], p)
    for p in passage_embeddings["colbert_vecs"]
]

# === Step 8: 融合三种得分（加权平均） ===
# 权重建议：Dense: 0.5, Sparse: 0.3, MaxSim: 0.2
final_scores = [
    0.5 * dense + 0.3 * sparse + 0.2 * maxsim
    for dense, sparse, maxsim in zip(dense_scores, sparse_scores, maxsim_scores)
]

# === Step 9: 排序并输出结果 ===
results = sorted(zip(passages, final_scores), key=lambda x: x[1], reverse=True)

print("混合检索结果（按相关性排序）:")
for doc, score in results:
    print(f"\n得分 {score:.4f}:\n{doc}")