import numpy as np
from sklearn.metrics import ndcg_score

# 召回率表示模型从数据集中所有实际正样本中正确预测正实例的能力
def calc_recall(preds, truths, cutoffs):
    recalls = np.zeros(len(cutoffs))
    for text, truth in zip(preds, truths):
        for i, c in enumerate(cutoffs):
            # np.intersect1d 得到两个集合的交集
            hits = np.intersect1d(truth, text[:c])
            recalls[i] += len(hits) / max(min(c, len(truth)), 1)
    recalls /= len(preds)
    return recalls

# Mean Reciprocal Rank (MRR) is a widely used metric in information retrieval to evaluate the effectiveness of a system. It measures the rank position of the first relevant result in a list of search results.
# # 平均倒数排名 （MRR） 是信息检索中广泛使用的指标，用于评估系统的有效性。它测量搜索结果列表中第一个相关结果的排名位置。
def calc_MRR(preds, truth, cutoffs):
    mrr = [0 for _ in range(len(cutoffs))]
    for pred, t in zip(preds, truth):
        for i, c in enumerate(cutoffs):
            for j, p in enumerate(pred):
                if j < c and p in t:
                    mrr[i] += 1/(j+1)
                    break
    mrr = [k/len(preds) for k in mrr]
    return mrr

"""
Normalized Discounted Cumulative Gain (nDCG) measures the quality of a ranked list of search results by considering both the position of the relevant documents and their graded relevance scores. The calculation of nDCG involves two main steps

1. Discounted cumulative gain (DCG) measures the ranking quality in retrieval tasks
2. Normalized by ideal DCG to make it comparable across queries

where IDCG is the maximum possible DCG for a given set of documents, assuming they are perfectly ranked in order of relevance.

标准化折扣累积增益 （nDCG） 通过考虑相关文档的位置及其分级相关性分数来衡量搜索结果排名列表的质量。nDCG 的计算涉及两个主要步骤

1. 折扣累积增益 （DCG） 衡量检索任务中的排名质量
2. 按理想 DCG 进行规范化，使其在查询之间具有可比性

其中 IDCG 是给定文档集的最大可能 DCG，假设它们按相关性顺序完美排序。
"""
def calc_nDCG(results, ground_truth, cutoffs):
    pred_hard_encodings = []
    for pred, label in zip(results, ground_truth):
        pred_hard_encoding = list(np.isin(pred, label).astype(int))
        pred_hard_encodings.append(pred_hard_encoding)
    for i, c in enumerate(cutoffs):
        nDCG = ndcg_score(pred_hard_encodings, results, k=c)
        print(f"nDCG@{c}: {nDCG}")

# in information retrieval, it's the ratio of relevant documents retrieved to the totoal number of documents retrieved
# 准确率 在信息检索中，它是检索到的相关文档与检索到的文档总数的比率
def calc_precision(preds, truths, cutoffs):
    prec = np.zeros(len(cutoffs))
    for text, truth in zip(preds, truths):
        for i, c in enumerate(cutoffs):
            hits = np.intersect1d(truth, text[:c])
            prec[i] += len(hits) / c
    prec /= len(preds)
    return prec

"""
Mean Average Precision (MAP) measures the effectiveness of a system at returning relevant documents across multiple queries

First, Average Precision (AP) evals how well relevant documents are ranked within the retrieved documents. It's computed by averaging the precision values for each position of relevant document in the ranking of all the retrieved documents
```math
\textbf{AP}=\frac{\sum_{k=1}^{M}\text{Relevance}(k) \times \text{Precision}(k)}{|\{\text{Relevant Docs}\}|}
```
where

 M is the total number of documents retrieved.
 Relevance(K) is a binary value, indicating whether document at position K is relevant (=1) or not (=0).
 Precision(K) is the precision when considering only top K retrieved items.

Then calculate the average AP across multiple queries to get the MAP
```math
\textbf{MAP}=\frac{1}{N}\sum_{i=1}^{N}\text{AP}_i
```
"""
def calc_AP(encoding):
    rel = 0
    precs = 0.0
    for k, hit in enumerate(encoding, start=1):
        if hit == 1:
            rel += 1
            precs += rel/k

    return 0 if rel == 0 else precs/rel

def calc_MAP(encodings, cutoffs):
    res = []
    for c in cutoffs:
        ap_sum = 0.0
        for encoding in encodings:
            ap_sum += calc_AP(encoding[:c])
        res.append(ap_sum/len(encodings))
        
    return res


# 评估指标
if __name__ == '__main__':
    # 假设我们有一个语料库，其文档 ID 从 0 到 30。
    # 包含每个查询的实际相关文档 ID
    ground_truth = [
        [11,  1,  7, 17],
        [ 4, 16,  1, 21],
        [26, 10, 22,  8],
    ]

    # 包含某些检索系统对每个查询的搜索结果
    results = [
        [11,  1, 17,  7, 21,  8,  0, 28,  9, 20],
        [16,  1,  6, 18,  3,  4, 25, 19,  8, 14],
        [24, 10, 26,  2,  8, 28,  4, 23, 13, 21],
    ]

    print(np.intersect1d(ground_truth, results))
    print(np.isin(ground_truth, results).astype(int))

    # 我们对以下截止值感兴趣
    cutoffs = [1, 5, 10]

    recalls = calc_recall(results, ground_truth, cutoffs)
    for i, c in enumerate(cutoffs):
        print(f"recall@{c}: {recalls[i]}")
    
    mrr = calc_MRR(results, ground_truth, cutoffs)
    for i, c in enumerate(cutoffs):
        print(f"MRR@{c}: {mrr[i]}")
    
    precisions = calc_precision(results, ground_truth, cutoffs)
    for i, c in enumerate(cutoffs):
        print(f"precision@{c}: {precisions[i]}")
    

    # maps = calc_MAP(pred_hard_encodings, cutoffs)
    # for i, c in enumerate(cutoffs):
    #     print(f"MAP@{c}: {maps[i]}")
    