from __init__ import *
"""模型评估模块"""
import os
import json
import tempfile
import numpy as np
from typing import List, Tuple, Dict, Any
from tqdm import tqdm

from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval

from utils.geometry import GeometryUtils

class DetectionEvaluator:
    """目标检测评估器"""
    
    def __init__(self, iou_threshold: float = 0.5):
        self.iou_threshold = iou_threshold
        self.reset()
    
    def reset(self):
        """重置评估状态"""
        self.total_tp = 0
        self.total_fp = 0
        self.total_fn = 0
        self.all_predictions = []
        self.all_ground_truths = []
        self.image_filenames = []
    
    def evaluate_single_image(self, 
                             predictions: np.ndarray,
                             ground_truths: np.ndarray) -> Tuple[int, int, int]:
        """
        评估单张图像
        
        Args:
            predictions: 预测结果 [x1, y1, x2, y2, score, class_id]
            ground_truths: 真实标注 [class_id, x1, y1, x2, y2]
            
        Returns:
            (true_positives, false_positives, false_negatives)
        """
        if predictions.shape[0] == 0:
            return 0, 0, len(ground_truths)
        
        # 按置信度排序
        sorted_indices = predictions[:, 4].argsort()[::-1]
        predictions = predictions[sorted_indices]
        
        tp = 0
        pred_used = np.zeros(predictions.shape[0], dtype=bool)
        gt_used = np.zeros(ground_truths.shape[0], dtype=bool)
        
        for i, pred in enumerate(predictions):
            pred_class = int(pred[5])
            pred_box = pred[:4][None]
            
            # 找到相同类别的ground truth
            same_class_mask = (ground_truths[:, 0] == pred_class)
            if not np.any(same_class_mask):
                continue
            
            # 计算IoU
            gt_boxes = ground_truths[same_class_mask][:, 1:5]
            ious = GeometryUtils.calculate_iou(pred_box, gt_boxes)[0]
            
            # 找到最大IoU的ground truth
            max_iou_idx = np.argmax(ious)
            if (ious[max_iou_idx] >= self.iou_threshold and 
                not gt_used[same_class_mask][max_iou_idx]):
                tp += 1
                gt_used[np.where(same_class_mask)[0][max_iou_idx]] = True
                pred_used[i] = True
        
        fp = predictions.shape[0] - np.sum(pred_used)
        fn = np.sum(~gt_used)
        
        return tp, fp, fn
    
    def add_batch(self, 
                  predictions_batch: List[np.ndarray],
                  ground_truths_batch: List[np.ndarray],
                  filenames_batch: List[str]):
        """添加批量评估数据"""
        for preds, gts, filename in zip(predictions_batch, ground_truths_batch, filenames_batch):
            tp, fp, fn = self.evaluate_single_image(preds, gts)
            self.total_tp += tp
            self.total_fp += fp
            self.total_fn += fn
            
            self.all_predictions.append(preds)
            self.all_ground_truths.append(gts)
            self.image_filenames.append(filename)
    
    def get_metrics(self) -> Dict[str, float]:
        """获取评估指标"""
        precision = self.total_tp / (self.total_tp + self.total_fp + 1e-12)
        recall = self.total_tp / (self.total_tp + self.total_fn + 1e-12)
        f1_score = 2 * precision * recall / (precision + recall + 1e-12)
        
        return {
            'precision': precision,
            'recall': recall,
            'f1_score': f1_score,
            'true_positives': self.total_tp,
            'false_positives': self.total_fp,
            'false_negatives': self.total_fn
        }
    
    def calculate_coco_map(self, num_classes: int = 80) -> Tuple[float, float]:
        """计算COCO格式的mAP"""
        gt_dict, pred_list = self._prepare_coco_format(num_classes)
        return self._evaluate_with_coco_api(gt_dict, pred_list)
    
    def _prepare_coco_format(self, num_classes: int) -> Tuple[Dict, List]:
        """准备COCO格式数据"""
        # 图像信息
        images = []
        for idx, filename in enumerate(self.image_filenames):
            images.append({
                'id': idx,
                'file_name': os.path.basename(filename)
            })
        
        # 标注信息
        annotations = []
        ann_id = 1
        for img_idx, gt in enumerate(self.all_ground_truths):
            for row in gt:
                class_id, x1, y1, x2, y2 = row
                width = x2 - x1
                height = y2 - y1
                annotations.append({
                    'id': ann_id,
                    'image_id': img_idx,
                    'category_id': int(class_id),
                    'bbox': [float(x1), float(y1), float(width), float(height)],
                    'area': float(width * height),
                    'iscrowd': 0
                })
                ann_id += 1
        
        # 预测信息
        detections = []
        for img_idx, det in enumerate(self.all_predictions):
            for row in det:
                x1, y1, x2, y2, score, class_id = row
                width = x2 - x1
                height = y2 - y1
                detections.append({
                    'image_id': img_idx,
                    'category_id': int(class_id),
                    'bbox': [float(x1), float(y1), float(width), float(height)],
                    'score': float(score)
                })
        
        # 类别信息
        categories = [{'id': i, 'name': str(i)} for i in range(num_classes)]
        
        gt_dict = {
            'info': {},
            'images': images,
            'annotations': annotations,
            'categories': categories
        }
        
        return gt_dict, detections
    
    def _evaluate_with_coco_api(self, gt_dict: Dict, pred_list: List) -> Tuple[float, float]:
        """使用COCO API评估"""
        with tempfile.TemporaryDirectory() as tmpdir:
            # 保存临时文件
            gt_file = os.path.join(tmpdir, "annotations.json")
            pred_file = os.path.join(tmpdir, "predictions.json")
            
            with open(gt_file, 'w') as f:
                json.dump(gt_dict, f)
            with open(pred_file, 'w') as f:
                json.dump(pred_list, f)
            
            # 使用COCO API评估
            coco_gt = COCO(gt_file)
            coco_dt = coco_gt.loadRes(pred_file)
            coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
            
            coco_eval.evaluate()
            coco_eval.accumulate()
            coco_eval.summarize()
            
            map_50 = coco_eval.stats[1]      # mAP@0.5
            map_50_95 = coco_eval.stats[0]   # mAP@0.5:0.95
            
        return map_50, map_50_95