# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import copy
from sklearn.metrics import roc_auc_score, average_precision_score, ndcg_score, precision_recall_curve, precision_score, recall_score, accuracy_score, f1_score
from .base import BaseClassificationEvaluator
from utils import EVALUATORS


@EVALUATORS.register_module("auc")
class AUCEvaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)

    def score(self):
        auc = roc_auc_score(self.y_truth, self.y_pred_proba)
        scores = {
            "AUC": round(auc * 100, 4),
        }
        return scores


@EVALUATORS.register_module("ap")
class APEvaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)

    def score(self):
        ap = average_precision_score(self.y_truth, self.y_pred_proba)
        scores = {
            "AP": round(ap * 100, 4),
        }
        return scores


@EVALUATORS.register_module("ndcg")
class NDCGEvaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)

    def score(self):
        ndcg = ndcg_score([self.y_truth], [self.y_pred_proba])
        scores = {
            "NDCG": round(ndcg * 100, 4),
        }
        return scores


@EVALUATORS.register_module("target_pr_threshold")
class TargetPRThresholdEvaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, target_precision=None, target_recall=None, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)
        self.target_precision = target_precision
        self.target_recall = target_recall
        assert self.target_precision is not None or self.target_recall is not None

    def score(self):
        precision_list, recall_list, threshold_list = precision_recall_curve(self.y_truth, self.y_pred_proba)
        th = {}
        if self.target_precision is not None:
            target_precision_threshold = 1.0
            for p, r, t in zip(precision_list[:-1][::-1], recall_list[:-1][::-1], threshold_list[::-1]):
                if p >= self.target_precision:
                    target_precision_threshold = t
            th["target_precision_threshold"] = target_precision_threshold

        if self.target_recall is not None:
            target_recall_threshold = 1.0
            for p, r, t in zip(precision_list, recall_list, threshold_list):
                if r < self.target_recall:
                    break
                target_recall_threshold = t
            th["target_recall_threshold"] = target_recall_threshold
        return th


@EVALUATORS.register_module("precision_score")
class PrecisionScoreEvaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, threshold=0.5, reverse=False, metric_name_suffix=None, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)
        self.threshold = threshold
        self.reverse = reverse
        self.metric_name_suffix = metric_name_suffix

    def score(self):
        y_truth = copy.deepcopy(self.y_truth)
        y_pred_proba = copy.deepcopy(self.y_pred_proba)
        y_pred = np.array(y_pred_proba) >= self.threshold
        if self.reverse:
            y_truth = 1 - np.array(y_truth)
            y_pred = np.array(y_pred_proba) < self.threshold
        precision = precision_score(y_truth, y_pred)
        scores = {}
        metric_name = "Precision"
        if self.reverse:
            metric_name = "Precision_Neg"
        if self.metric_name_suffix:
            metric_name = f"{metric_name}@{self.metric_name_suffix}"
        scores[metric_name] = round(precision * 100, 4)
        return scores


@EVALUATORS.register_module("recall_score")
class RecallScoreEvaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, threshold=0.5, reverse=False, metric_name_suffix=None, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)
        self.threshold = threshold
        self.reverse = reverse
        self.metric_name_suffix = metric_name_suffix

    def score(self):
        y_truth = copy.deepcopy(self.y_truth)
        y_pred_proba = copy.deepcopy(self.y_pred_proba)
        y_pred = np.array(y_pred_proba) >= self.threshold
        if self.reverse:
            y_truth = 1 - np.array(y_truth)
            y_pred = np.array(y_pred_proba) < self.threshold
        recall = recall_score(y_truth, y_pred)
        scores = {}
        metric_name = "Recall"
        if self.reverse:
            metric_name = "Recall_Neg"
        if self.metric_name_suffix:
            metric_name = f"{metric_name}@{self.metric_name_suffix}"
        scores[metric_name] = round(recall * 100, 4)
        return scores


@EVALUATORS.register_module("accuracy")
class AccuracyEvaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, threshold=0.5, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)
        self.threshold = threshold
        self.y_pred = []

    def score(self):
        for data in self.y_pred_proba:
            if isinstance(data, np.ndarray):
                self.y_pred.append(np.argmax(data))
            elif isinstance(data, list):
                data = np.array(data)
                self.y_pred.append(np.argmax(data))
            elif isinstance(data, float):
                self.y_pred.append(1 if data > self.threshold else 0)
        accuracy = accuracy_score(self.y_truth, self.y_pred)
        scores = {
            "accuracy": round(accuracy * 100, 4),
        }
        return scores


@EVALUATORS.register_module("f1")
class F1Evaluator(BaseClassificationEvaluator):
    def __init__(self, y_truth, y_pred_proba, average='macro', threshold=0.5, **kwargs):
        super().__init__(y_truth, y_pred_proba, **kwargs)
        self.threshold = threshold
        self.average = average
        self.y_pred = []

    def score(self):
        for data in self.y_pred_proba:
            if isinstance(data, np.ndarray):
                self.y_pred.append(np.argmax(data))
            elif isinstance(data, list):
                data = np.array(data)
                self.y_pred.append(np.argmax(data))
            elif isinstance(data, float):
                self.y_pred.append(1 if data > self.threshold else 0)
        accuracy = f1_score(self.y_truth, self.y_pred, average=self.average)
        score_key = 'f1' if self.average == 'macro' else "f1_{}".format(self.average)
        scores = {
            score_key: round(accuracy * 100, 4),
        }
        return scores
