from datasets import load_dataset


import os
import sys

# 设置环境变量 确保 Conda 的库路径优先加载 libstdc++.so.6
# 设置 Conda 环境的库路径
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix:
    lib_path = os.path.join(conda_prefix, 'lib')
    current_ld_path = os.environ.get('LD_LIBRARY_PATH', '')
    
    # 确保 Conda 库路径在首位
    new_ld_path = f"{lib_path}:{current_ld_path}" if current_ld_path else lib_path
    os.environ['LD_LIBRARY_PATH'] = new_ld_path
    
    # 立即生效（仅对当前进程有效） 
    if 'LD_LIBRARY_PATH' in os.environ:
        try:
            os.add_dll_directory(lib_path)  # Windows 兼容性
        except AttributeError:
            pass  # Unix 系统忽略
    print(f"已设置 LD_LIBRARY_PATH: {new_ld_path}")

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
print(f"current_dir: {current_dir}")
sys.path.insert(0, parent_dir)

from my_common import  BGE_LARGE_ZH_V1_5_MODEL

"""
    2. Search
    Then we prepare a function to encode the text into embeddings and search the results:
    然后我们准备一个函数，将文本编码为嵌入向量并搜索结果：
"""
def search(model, queries_text, corpus_text):
    import faiss
    import numpy as np
    from tqdm import tqdm
    
    queries_embeddings = model.encode_queries(queries_text)
    corpus_embeddings = model.encode_corpus(corpus_text)
    
    # create and store the embeddings in a Faiss index
    # 在 Faiss 索引中创建和存储嵌入
    dim = corpus_embeddings.shape[-1]
    index = faiss.index_factory(dim, 'Flat', faiss.METRIC_INNER_PRODUCT)
    corpus_embeddings = corpus_embeddings.astype(np.float32)
    index.train(corpus_embeddings)
    index.add(corpus_embeddings)
    
    query_size = len(queries_embeddings)

    all_scores = []
    all_indices = []

    # search top 100 answers for all the queries
    # 搜索所有查询的前 100 个答案
    for i in tqdm(range(0, query_size, 32), desc="Searching"):
        j = min(i + 32, query_size)
        query_embedding = queries_embeddings[i: j]
        score, indice = index.search(query_embedding.astype(np.float32), k=100)
        all_scores.append(score)
        all_indices.append(indice)

    all_scores = np.concatenate(all_scores, axis=0)
    all_indices = np.concatenate(all_indices, axis=0)
    
    # store the results into the format for evaluation
    # 将结果存储到用于评估的格式中
    results = {}
    for idx, (scores, indices) in enumerate(zip(all_scores, all_indices)):
        results[queries["id"][idx]] = {}
        for score, index in zip(scores, indices):
            if index != -1:
                results[queries["id"][idx]][corpus["id"][index]] = float(score)
                
    return results

financial_qa_10K_data=f"{current_dir}/virattt/financial-qa-10K"

ft_data_dir=f"{current_dir}/ft_data"

if __name__ == '__main__':
    """
    1. Load Data
    We first load data from the files we processed.
    我们首先从我们处理的文件中加载数据。
    """

    queries = load_dataset("json", data_files=f"{ft_data_dir}/test_queries.jsonl")["train"]
    corpus = load_dataset("json", data_files=f"{ft_data_dir}/corpus.jsonl")["train"]
    qrels = load_dataset("json", data_files=f"{ft_data_dir}/test_qrels.jsonl")["train"]

    queries_text = queries["text"]
    corpus_text = [text for sub in corpus["text"] for text in sub]

    qrels_dict = {}
    for line in qrels:
        if line['qid'] not in qrels_dict:
            qrels_dict[line['qid']] = {}
        qrels_dict[line['qid']][line['docid']] = line['relevance']
    
    """
    3. Evaluation
    """
    from FlagEmbedding.abc.evaluation.utils import evaluate_metrics, evaluate_mrr
    from FlagEmbedding import FlagModel
    k_values = [10,100]

    raw_name = BGE_LARGE_ZH_V1_5_MODEL
    finetuned_path = f"{current_dir}/test_encoder_only_base_bge-large-en-v1.5"

    # The result for the original model:
    # 原始模型的结果：
    raw_model = FlagModel(
        raw_name, 
        query_instruction_for_retrieval="Represent this sentence for searching relevant passages:",
        devices=[0],
        use_fp16=False
    )

    results = search(raw_model, queries_text, corpus_text)

    eval_res = evaluate_metrics(qrels_dict, results, k_values)
    mrr = evaluate_mrr(qrels_dict, results, k_values)
    print("original model result:")
    for res in eval_res:
        print(res)
    print(mrr)

    # Then the result for the model after fine-tuning:
    # 然后是模型微调后的结果：
    ft_model = FlagModel(
        finetuned_path, 
        query_instruction_for_retrieval="Represent this sentence for searching relevant passages:",
        devices=[0],
        use_fp16=False
    )

    results = search(ft_model, queries_text, corpus_text)

    eval_res = evaluate_metrics(qrels_dict, results, k_values)
    mrr = evaluate_mrr(qrels_dict, results, k_values)

    print("fine-tuned model result:")
    for res in eval_res:
        print(res)
    print(mrr)

