from sklearn.metrics import roc_curve, roc_auc_score
import numpy as np
import matplotlib.pyplot as plt
import scipy


class my_indicators():
    """
        指标的实现
    """

    def __init__(self) -> None:
        pass

    # R = [[3, 10, 15, 12, 17], [20, 15, 18, 14, 30], [2, 5, 7, 8, 15], [56, 14, 25, 12, 19], [21, 24, 36, 54, 45]]
    # T = [[12], [3], [5], [14], [20]]
    # print(MRR(R, T))
    def MRR(self, ranked_list, ground_truth):
        """
            MRR:
                对于每个查询（或用户），获取系统返回的排名列表。
                找到列表中第一个正确的结果（即相关的结果）的位置。
                计算该位置的倒数（即 1/位置）作为该查询的倒数排名（Reciprocal Rank）。
                对所有查询的倒数排名取平均，得到平均倒数排名（MRR）。
        """
        rr = 0.0
        for i in range(len(ranked_list)):
            for j in range(len(ranked_list[i])):
                if ranked_list[i][j] in ground_truth[i]:
                    rr += 1/(j+1)  # 注意j的取值从0开始
                    break
        mrr = rr / len(ground_truth)
        return mrr

    # def modify_res(self,res):
    #     a=max(res)
    #     b=min(res)
    #     temp=[0]*len(res)
    #     for i in range(len(res)):
    #         # 归一+翻转
    #         tar=1-(res[i]-b)/(a-b)
    #         temp[i]=tar
    #     return temp

    def auc(self, y_truth, y_pred):
        auc_score = roc_auc_score(y_truth, y_pred)
        return auc_score

    def recall_at_k(self, ranked_list, true_list, k):
        """
            top-k的召回率
        """
        lth = len(true_list)
        assert lth != 0  # 为真，则继续执行
        res = 0.0
        for i in range(len(ranked_list)):
            if i >= k:
                break
            if ranked_list[i] in true_list:
                res += 1
        return res/lth

    def ndcg_at_k(self, ranked_list, true_list, k):
        """
            k: 计算 NDCG@k，表示前 k 个项目

        """
        # 先计算理想的最大值
        normalization = 0.0
        lth = len(true_list)
        # 后续得分为0,k>=lth时的值都为0
        for i in range(lth):
            if i >= k:
                break
            normalization += 1.0 / np.log2(i + 1 + 1)
        if normalization == 0:
            return 0

        acculation = 0.0
        for i in range(len(ranked_list)):
            if i >= k:
                break
            if ranked_list[i] in true_list:
                acculation += 1.0/np.log2(i+1+1)
        return acculation/normalization

    def precision(self, ranked_list, true_list):
        """
            precision,
            根据true_list中个数来确定正样本的个数
        """
        lth = len(true_list)
        assert lth != 0
        res = 0.0
        for i in range(lth):
            if ranked_list[i] in true_list:
                res += 1
        return res/lth

    def accuracy(self, ranked_list, true_list):
        """
            accuracy,
            根据true_list中个数来确定正样本的个数
        """
        lth = len(true_list)
        n = len(ranked_list)
        assert lth != 0
        res = 0.0
        for i in range(lth):
            if ranked_list[i] in true_list:
                res += 1
        right = res+(n-lth)-(lth-res)
        return right/n

    def accuracy_base_auc(self, ranked_list, true_list):
        """
            accuracy,
            之前的版本，使用all_to_all来计算accuracy感觉不是很合理
            太难搞了，给一个默认的阈值好了。别的也解释不清楚
        """
        lth = len(true_list)
        n = len(ranked_list)
        assert lth != 0
        res = 0.0
        for i in range(lth):
            if ranked_list[i] in true_list:
                res += 1
        right = res+(n-lth)-(lth-res)
        return right/n


if __name__ == "__main__":
    indic = my_indicators()

    # 示例数据
    ranked_list = [1, 2, 0, 3]  # 示例排名结果
    true_list = [3, 2, 1, 2]    # 示例真实相关性分数

    # 计算 NDCG@3
    k = 3
    ndcg = indic.ndcg_at_k(ranked_list, true_list, k)
    print(f"NDCG@{k}: {ndcg:.4f}")
