"""
@Filename       : metric.py
@Create Time    : 2021/1/14 9:15
@Author         : Rylynn
@Description    : 

"""

import numpy as np
import ml_metrics as metrics
import sklearn.metrics as sk_metrics


class Metric:
    MRR = 0
    HIT = 1
    MAP = 2
    ILL = 3
    SLL = 4
    F1 = 5
    P = 6
    R = 7
    AUC = 8


class Evaluator:
    def __init__(self, metric_list, k_list=None):
        if k_list is None:
            k_list = [10, 50, 100]
        self.require_metric = metric_list
        self.metric_dict = {
            Metric.MRR: self.__mrr,
            Metric.HIT: self.__hit_k,
            Metric.MAP: self.__map_k,
            Metric.ILL: self.__ill,
            Metric.SLL: self.__sll,
            Metric.F1: self.__f1,
            Metric.P: self.__precision,
            Metric.R: self.__recall,
            Metric.AUC: self.__auc,
        }
        self.k = k_list

    def __mrr(self, actual, probs, predict):
        pass

    def __hit_k(self, actual, probs, predict):
        total = 0
        for k in self.k:
            count = 0
            for a in actual:
                if a in predict[:k]:
                    count += 1
        return total, count

    def __map_k(self, actual, probs, predict):
        return [('map@{}'.format(k), metrics.mapk(actual, predict, k)) for k in self.k]

    def __ill(self, actual, probs, predict):
        pass

    def __sll(self, actual, probs, predict):
        return 'sll', metrics.ll(actual, probs)

    def __f1(self, actual, probs, predict):
        return 'f1', sk_metrics.f1_score(actual, predict)

    def __precision(self, actual, probs, predict):
        return sk_metrics.precision_score(actual, predict)

    def __recall(self, actual, probs, predict):
        return 'recall', sk_metrics.recall_score(actual, predict)

    def __auc(self, actual, probs, predict):
        return 'auc', metrics.auc(actual, predict)

    def __run_metric(self, metric, actual, probs, predict):
        return self.metric_dict[metric](actual, probs, predict)

    def run(self, actual, probs):
        result_dict = {}
        predict = np.argsort(-probs)
        for metric in self.require_metric:
            result_dict[metric] = self.__run_metric(metric, actual, probs, predict)
        print(list(result_dict.values()))
        return result_dict


def main():
    evaluator = Evaluator([Metric.HIT, Metric.SLL])
    evaluator.run([[3], [1], [3], [0]], np.array([[0.1, 0.2, 0.3, 0.4],
                                                  [0.1, 0.2, 0.3, 0.4],
                                                  [0.1, 0.2, 0.3, 0.4],
                                                  [0.1, 0.2, 0.3, 0.4]]))


if __name__ == '__main__':
    main()
