import faiss
from tqdm import tqdm

from datasets import load_dataset
import numpy as np

import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
print(f"current_dir: {current_dir}")
sys.path.insert(0, parent_dir)
from my_common import  load_flag_model, load_flag_reranker_large_model


"""
Reranker is designed in cross-encoder architecture that takes the query and text at the same time and directly output their score of similarity. It is more capable of scoring the query-text relevance, but with the tradeoff of slower speed. Thus, a complete retrieval system usually contains retrievers in the first stage to do a large scope retrieval, and then followed by rerankers to rerank the results more precisely.
Reranker 采用跨编码器架构设计，同时获取查询和文本，并直接输出它们的相似度分数。它更有能力对查询文本相关性进行评分，但代价是速度较慢。因此，一个完整的检索系统通常在第一阶段包含 retrievers 以进行大范围检索，然后是 rerankers 以更精确地对结果进行重新排序。

In this tutorial, we will go through text retrieval pipeline with reranker and evaluate the results before and after reranking.
在本教程中，我们将使用 reranker 遍历文本检索管道，并评估重新排序前后的结果。

Note: Steps 1-4 are identical to the tutorial of evaluation. We suggest to first go through that if you are not familiar with retrieval.
注意：步骤 1-4 与评估教程相同。如果您不熟悉检索，我们建议您先完成此作。
"""

def calc_recall(preds, truths, cutoffs):
    recalls = np.zeros(len(cutoffs))
    for text, truth in zip(preds, truths):
        for i, c in enumerate(cutoffs):
            recall = np.intersect1d(truth, text[:c])
            recalls[i] += len(recall) / max(min(len(recall), len(truth)), 1)
    recalls /= len(preds)
    return recalls

def MRR(preds, truth, cutoffs):
    mrr = [0 for _ in range(len(cutoffs))]
    for pred, t in zip(preds, truth):
        for i, c in enumerate(cutoffs):
            for j, p in enumerate(pred):
                if j < c and p in t:
                    mrr[i] += 1/(j+1)
                    break
    mrr = [k/len(preds) for k in mrr]
    return mrr


if __name__ == "__main__":
    # 1. Dataset
    # Download and preprocess the MS Marco dataset
    print("----------------- Loading the MS Marco dataset... -----------------")
    data = load_dataset(f"{current_dir}/hf-data", split="validation")

    queries = np.array(data[:100]["query"])
    corpus = sum(data[:5000]["positive"], [])

    # 2. Embedding
    # get the BGE embedding model
    print("----------------- Embedding... -----------------")
    model = load_flag_model(query_instruction_for_retrieval="Represent this sentence for searching relevant passages:")

    # get the embedding of the corpus
    corpus_embeddings = model.encode(corpus)

    print("shape of the corpus embeddings:", corpus_embeddings.shape)
    print("data type of the embeddings: ", corpus_embeddings.dtype)

    # 3. Indexing
    # get the length of our embedding vectors, vectors by bge-base-en-v1.5 have length 768
    print("----------------- embedding Indexing... -----------------")
    dim = corpus_embeddings.shape[-1]

    # create the faiss index and store the corpus embeddings into the vector space
    index = faiss.index_factory(dim, 'Flat', faiss.METRIC_INNER_PRODUCT)
    corpus_embeddings = corpus_embeddings.astype(np.float32)
    index.train(corpus_embeddings)
    index.add(corpus_embeddings)

    print(f"total number of vectors: {index.ntotal}")

    # 4. Retrieval
    print("----------------- Retrieval... -----------------")
    query_embeddings = model.encode_queries(queries)
    ground_truths = [d["positive"] for d in data]
    corpus = np.asarray(corpus)

    res_scores, res_ids, res_text = [], [], []
    query_size = len(query_embeddings)
    batch_size = 256
    # The cutoffs we will use during evaluation, and set k to be the maximum of the cutoffs.
    cut_offs = [1, 10]
    k = max(cut_offs)

    for i in tqdm(range(0, query_size, batch_size), desc="Searching"):
        q_embedding = query_embeddings[i: min(i+batch_size, query_size)].astype(np.float32)
        # search the top k answers for each of the queries
        score, idx = index.search(q_embedding, k=k)
        res_scores += list(score)
        res_ids += list(idx)
        res_text += list(corpus[idx])
    
    # 5. Reranking
    print("----------------- Reranking... -----------------")
    """
    Now we will use a reranker to rerank the list of answers we retrieved using our index. Hopefully, this will lead to better results.

    The following table lists the available BGE rerankers. Feel free to try out to see their differences!
    """
    reranker = load_flag_reranker_large_model()

    # use the compute_score() function to calculate scores for each input sentence pair
    scores = reranker.compute_score([
        ['what is panda?', 'Today is a sunny day'], 
        ['what is panda?', 'The tiger (Panthera tigris) is a member of the genus Panthera and the largest living cat species native to Asia.'],
        ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']
        ])
    print(scores)

    # Now, let's use the reranker to rerank our previously retrieved results:
    new_ids, new_scores, new_text = [], [], []
    for i in range(len(queries)):
        # get the new scores of the previously retrieved results
        new_score = reranker.compute_score([[queries[i], text] for text in res_text[i]])
        # sort the lists of ids and scores by the new scores
        new_id = [tup[1] for tup in sorted(list(zip(new_score, res_ids[i])), reverse=True)]
        new_scores.append(sorted(new_score, reverse=True))
        new_ids.append(new_id)
        new_text.append(corpus[new_id])

    # 6. Evaluate
    # For details of these metrics, please check out the tutorial of evaluation.

    # 6.1 Recall 
    # Before reranking:
    print("----------------- Evaluate Recall Before reranking: ----------------- ")
    recalls_init = calc_recall(res_text, ground_truths, cut_offs)
    for i, c in enumerate(cut_offs):
        print(f"recall@{c}:\t{recalls_init[i]}")
    # After reranking:
    print("-----------------Evaluate Recall After reranking: ----------------- ")
    recalls_rerank = calc_recall(new_text, ground_truths, cut_offs)
    for i, c in enumerate(cut_offs):
        print(f"recall@{c}:\t{recalls_rerank[i]}")
    
    # 6.2 MRR
    # Before reranking:
    print("-----------------Evaluate MRR Before reranking: ----------------- ")
    mrr_init = MRR(res_text, ground_truths, cut_offs)
    for i, c in enumerate(cut_offs):
        print(f"MRR@{c}:\t{mrr_init[i]}")
    # After reranking:
    print("-----------------Evaluate MRR After reranking: ----------------- ")
    mrr_rerank = MRR(new_text, ground_truths, cut_offs)
    for i, c in enumerate(cut_offs):
        print(f"MRR@{c}:\t{mrr_rerank[i]}")
    
    # 6.3 nDCG
    # Before reranking:
    print("-----------------Evaluate nDCG Before reranking: ----------------- ")
    from sklearn.metrics import ndcg_score

    pred_hard_encodings = []
    for pred, label in zip(res_text, ground_truths):
        pred_hard_encoding = list(np.isin(pred, label).astype(int))
        pred_hard_encodings.append(pred_hard_encoding)

    for i, c in enumerate(cut_offs):
        nDCG = ndcg_score(pred_hard_encodings, res_scores, k=c)
        print(f"nDCG@{c}: {nDCG}")
    
    # After reranking:
    print("-----------------Evaluate nDCG After reranking: ----------------- ")
    pred_hard_encodings_rerank = []
    for pred, label in zip(new_text, ground_truths):
        pred_hard_encoding = list(np.isin(pred, label).astype(int))
        pred_hard_encodings_rerank.append(pred_hard_encoding)

    for i, c in enumerate(cut_offs):
        nDCG = ndcg_score(pred_hard_encodings_rerank, new_scores, k=c)
        print(f"nDCG@{c}: {nDCG}")