import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
print(f"current_dir: {current_dir}")
sys.path.insert(0, parent_dir)


from my_common import get_langchain_chat_openai,load_flag_model

"""
Hard Negatives
Hard negatives are those negative samples that are particularly challenging for the model to distinguish from the positive ones. They are often close to the decision boundary or exhibit features that make them highly similar to the positive samples. Thus hard negative mining is widely used in machine learning tasks to make the model focus on subtle differences between similar instances, leading to better discrimination.
硬负值是那些负值样本，对于模型来说，它们特别难以与正值区分开来。它们通常接近决策边界或表现出使其与正样本高度相似的特征。因此，硬负挖掘被广泛用于机器学习任务中，使模型专注于相似实例之间的细微差异，从而获得更好的区分能力。

In text retrieval system, a hard negative could be document that share some feature similarities with the query but does not truly satisfy the query's intent. During retrieval, those documents could rank higher than the real answers. Thus it's valuable to explicitly train the model on these hard negatives.
在文本检索系统中，硬否定可以是与查询具有某些特征相似性但并不真正满足查询意图的文档。在检索过程中，这些文档的排名可能高于真实答案。因此，在这些硬负值样本上显式训练模型是有价值的。
"""
if __name__ == "__main__":
    model = load_flag_model()
    # Then, load the queries and corpus from dataset:
    # 然后，从 dataset 加载 queries 和 corpus：
    from datasets import load_dataset

    corpus = load_dataset(f"{current_dir}/BeIR/scifact", "corpus")["corpus"]
    queries = load_dataset(f"{current_dir}/BeIR/scifact", "queries")["queries"]

    corpus_ids = corpus.select_columns(["_id"])["_id"]
    corpus = corpus.select_columns(["text"])["text"]
    # We create a dictionary maping auto generated ids (starting from 0) used by FAISS index, for later use.
    # 我们创建一个字典，映射 FAISS 索引使用的自动生成的 id（从 0 开始），供以后使用。
    corpus_ids_map = {}
    for i in range(len(corpus)):
        corpus_ids_map[i] = corpus_ids[i]
    

    """
    2. Indexing
    Use the embedding model to encode the queries and corpus:
    使用 embedding 模型对查询和语料库进行编码：
    """
    p_vecs = model.encode(corpus)
    print("p_vecs.shape : ")
    print(p_vecs.shape)

    # Then create a FAISS index
    # 然后创建一个 FAISS 索引

    import torch, faiss
    import numpy as np

    # create a basic flat index with dimension match our embedding
    # 创建一个 dimension 匹配 embedding 的基本平面索引
    index = faiss.IndexFlatIP(len(p_vecs[0]))
    # make sure the embeddings are float32
    # 确保 embedding 是 float32
    p_vecs = np.asarray(p_vecs, dtype=np.float32)
    # use gpu to accelerate index searching
    # 使用 GPU 加速索引搜索
    if torch.cuda.is_available():
        co = faiss.GpuMultipleClonerOptions()
        co.shard = True
        co.useFloat16 = True
        index = faiss.index_cpu_to_all_gpus(index, co=co)
    # add all the embeddings to the index
    # 将所有嵌入添加到索引中
    index.add(p_vecs)

    """
    3. Searching
    For better demonstration, let's use a single query:
    为了更好地演示，让我们使用单个查询：
    """
    query = queries[0]
    print(f"Query:\n {query}")

    # Get the id and content of that query, then use our embedding model to get its embedding vector.
    # 获取该查询的 id 和内容，然后使用我们的嵌入模型获取其嵌入向量。
    q_id, q_text = query["_id"], query["text"]
    # use the encode_queries() function to encode query
    # 使用 encode_queries() 函数对查询进行编码
    q_vec = model.encode_queries(queries=q_text)

    # Use the index to search for closest results:
    # 使用索引搜索最接近的结果：
    _, ids = index.search(np.expand_dims(q_vec, axis=0), k=15)
    # convert the auto ids back to ids in the original dataset
    # 将自动 ID 转换回原始数据集中的 ID
    converted = [corpus_ids_map[id] for id in ids[0]]
    print(f"Top 15 results:\n {converted}")

    qrels = load_dataset(f"{current_dir}/BeIR/scifact-qrels")["train"]
    pos_id = qrels[0]
    print(f"Positive id:\n {pos_id}")

    # Lastly, we use the mothod of top-k shifted by N, which get the top 10 negatives after rank 5.
    # 最后，我们使用 top-k 被 N 移动的 mothod，它得到排名 5 之后的前 10 个否定值。

    negatives = [id for id in converted[5:] if int(id) != pos_id["corpus-id"]]
    print(f"Negatives:\n {negatives}")

    """
    Now we have select a group of hard negatives for the first query!
    现在我们为第一个查询选择了一组硬否定！

    There are other methods to refine the process of choosing hard negatives. For example, the implementation in our GitHub repo get the top 200 shifted by 10, which mean top 10-210. And then sample 15 from the 200 candidates. The reason is directly choosing the top K may introduce some false negatives, passages that somehow relative to the query but not exactly the answer to that query, into the negative set. This could influence model's performance.
    还有其他方法可以改进选择硬负数的过程。例如，我们的 GitHub 存储库中的实现将前 200 名移动了 10，这意味着前 10 名到 210 名。然后从 200 个候选者中抽取 15 个。原因是直接选择前 K 个可能会引入一些假阴性，即以某种方式与查询相关但并非该查询的答案的段落，进入否定集。这可能会影响模型的性能。
    """