# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from utils import EVALUATORS
from .base import BaseRetrievalEvaluator
from collections import defaultdict
from sklearn.metrics import ndcg_score


@EVALUATORS.register_module("topk_recall")
class TopKRecallEvaluator(BaseRetrievalEvaluator):
    def __init__(self, retrieval_result, **kwargs):
        super().__init__(retrieval_result, **kwargs)
        self.topk_list = kwargs.get("topk", [1, 5, 10])

    def _single_query_score(self, gt_target_id_list, retrieval_target_list):
        score = {}
        retrieval_target_id_list = [x[0] for x in retrieval_target_list]
        for topk in self.topk_list:
            recall_count = len(set(retrieval_target_id_list[:topk]) & set(gt_target_id_list))
            score[f"TopKRecall@{topk}"] = float(recall_count) / float(min(topk,
                                                                          len(gt_target_id_list))) if len(gt_target_id_list) > 0 else 0
        return score

    def score(self):
        single_query_score_list = []
        for _, gt_target_id_list, retrieval_result_dict in self.retrieval_result:
            for _, retrieval_target_list in retrieval_result_dict.items():
                single_query_score = self._single_query_score(gt_target_id_list, retrieval_target_list)
                single_query_score_list.append(single_query_score)
        flatten_single_query_score = defaultdict(list)
        for single_query_score in single_query_score_list:
            for k, v in single_query_score.items():
                flatten_single_query_score[k].append(v)
        score = {}
        for k, v in flatten_single_query_score.items():
            mean_value = sum(v) / len(v)
            score[k] = round(mean_value * 100, 4)
        return score


@EVALUATORS.register_module("ndcg_retrieval")
class NDCGRetrievalEvaluator(BaseRetrievalEvaluator):
    def __init__(self, retrieval_result, **kwargs):
        super().__init__(retrieval_result, **kwargs)
        self.topk_list = kwargs.get("topk", [1, 5, 10])

    def _single_query_score(self, gt_target_id_list, retrieval_target_list):
        score = {}
        y_truth = []
        y_pred_proba = []
        for item in retrieval_target_list:
            retrieval_target_id = item[0]
            retrieval_score = item[1]
            if retrieval_target_id in gt_target_id_list:
                y_truth.append(1)
            else:
                y_truth.append(0)
            y_pred_proba.append(retrieval_score)
        for topk in self.topk_list:
            score[f"NDCG@{topk}"] = ndcg_score([y_truth], [y_pred_proba], k=topk)
        return score

    def score(self):
        single_query_score_list = []
        for _, gt_target_id_list, retrieval_result_dict in self.retrieval_result:
            for _, retrieval_target_list in retrieval_result_dict.items():
                single_query_score = self._single_query_score(gt_target_id_list, retrieval_target_list)
                single_query_score_list.append(single_query_score)
        flatten_single_query_score = defaultdict(list)
        for single_query_score in single_query_score_list:
            for k, v in single_query_score.items():
                flatten_single_query_score[k].append(v)
        score = {}
        for k, v in flatten_single_query_score.items():
            mean_value = sum(v) / len(v)
            score[k] = round(mean_value * 100, 4)
        return score
