from rank_bm25 import BM25Okapi
# from FlagEmbedding import BGEM3FlagModel
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def hybrid_search(model, query: str, docs: list, top_k: int=50, bm25_weight: float=0.04, dense_weight: float=1.0):
    """混合检索"""
    # 计算稠密相似度
    # docs_text = [doc["paper_title"] for doc in docs]
    docs_text = docs
    embeddings_1 = model.encode(query, 
                                batch_size=12, 
                                max_length=8192, # If you don't need such a long length, you can set a smaller value to speed up the encoding process.
                                )['dense_vecs']
    embeddings_2 = model.encode(docs_text, batch_size=12, max_length=8192)['dense_vecs']
    dense_similarity = embeddings_1 @ embeddings_2.T
    # 计算bm25相似度
    # tokenized_docs = [doc.split(" ") for doc in docs_text]
    # tokenized_query = query.split(" ")
    # bm25 = BM25Okapi(tokenized_docs)
    # bm25_similarity = bm25.get_scores(tokenized_query)
    # 获取相似度top_k个text
    # hybrid_similarity = dense_weight * dense_similarity + bm25_weight * bm25_similarity
    combined = list(zip(docs, dense_similarity.tolist()))
    sorted_combined = sorted(combined, key=lambda x: x[1], reverse=True)
    result = [item[0] for item in sorted_combined[:top_k]]
    return result

# model = BGEM3FlagModel('/sdc/model/BAAI/bge-m3', use_fp16=True)

# query = "windy London"

# docs = [
#     "Hello there good man!",
#     "It is quite windy in London",
#     "How is the weather today?"
# ]

# print(hybrid_search(model=model, query=query, docs=docs, top_k=2))