"""
@Description :   Object Detect 任务评价指标 Handler
@Author      :   tqychy 
@Time        :   2025/08/28 11:25:09
"""

import sys

import numpy as np
from tools import *

sys.path.append("./metrics")


class DetectMetricsHandler:
    def __init__(self, *args, iou_thresholds=[0.5, 0.75, 0.9]):
        self.cfg, self.logger = args
        self.iou_thresholds = iou_thresholds
        self.predictions = []
        self.ground_truths = []
        self.classes = set()

        self.metrics = None

    def update(self, pred_bboxes, pred_categories, scores, gt_bboxes, gt_categories):
        """
        更新预测和真实标签的指标，并动态更新类别数量和名称。

        参数:
            pred_bboxes: 预测的边界框列表 [x_min, y_min, width, height]
            pred_categories: 预测的类别名称列表
            scores: 预测的置信度分数列表
            gt_bboxes: 真实边界框列表 [x_min, y_min, width, height]
            gt_categories: 真实类别名称列表
        """
        self.metrics = None

        self.predictions.append({
            'bboxes': np.array(pred_bboxes),
            'categories': np.array(pred_categories),
            'scores': np.array(scores)
        })
        self.ground_truths.append({
            'bboxes': np.array(gt_bboxes),
            'categories': np.array(gt_categories)
        })
        all_categories = set(pred_categories).union(set(gt_categories))
        self.classes.update(all_categories)

        self.metrics = self.get_metrics()

    def get_metrics(self):
        """
        获取所有评价指标。

        返回:
            包含所有指标的字典
        """
        if self.metrics is not None:
            return self.metrics
        
        metrics = {}

        # 计算mAP
        map_scores = map(self.predictions, self.ground_truths,
                         self.classes, self.iou_thresholds)
        metrics.update(map_scores)

        # 计算默认IoU=0.5的平均精确度和召回率
        precision, recall = precision_recall(
            self.predictions, self.ground_truths, self.classes, 0.5)
        for c in self.classes:
            metrics[f'Precision_{c}'] = np.mean(
                precision[c]) if precision[c] else 0
            metrics[f'Recall_{c}'] = np.mean(recall[c]) if recall[c] else 0
        metrics['Precision_all'] = np.mean(
            [metrics[f'Precision_{c}'] for c in self.classes])
        metrics['Recall_all'] = np.mean(
            [metrics[f'Recall_{c}'] for c in self.classes])

        return metrics

    def desc_metrics(self):
        """
        用于 pbar 显示
        """
        metrics = self.get_metrics()
        return metrics["mAP@0.5"], (metrics["Precision_all"] + metrics["Recall_all"]) / 2

    def display(self):
        """
        用于测试过程中的 log
        """
        metrics = self.get_metrics()
        desc_str = f""
        for iou_th in self.iou_thresholds:
            desc_str += f"mAP@{iou_th}: {metrics[f'mAP@{iou_th}'] * 100:.2f}%"
        desc_str += f"Precision_all: {metrics['Precision_all'] * 100:.2f}%"
        desc_str += f"Recall_all: {metrics['Recall_all'] * 100:.2f}%\n"
        return desc_str
    
    def display_all(self):
        """
        完整结果显示
        """
        result_str = ""
        metrics = self.get_metrics()
        for k, v in metrics.items():
            result_str += f"{k}: {v}\n"
        return result_str

    def reset(self):
        """重置存储的预测、真实标签和类别集合。"""
        self.predictions = []
        self.ground_truths = []
        self.classes = set()
