#!/usr/bin/env python3
import pickle
import json
import numpy as np
from collections import defaultdict
import argparse

def load_result_pkl(result_path):
    """加载result.pkl文件"""
    with open(result_path, 'rb') as f:
        results = pickle.load(f)
    return results

def load_coco_annotations(annotation_path):
    """加载COCO格式的标注数据"""
    with open(annotation_path, 'r') as f:
        annotations = json.load(f)
    return annotations

def load_classes(classes_path):
    """加载类别名称"""
    with open(classes_path, 'r') as f:
        classes = [line.strip() for line in f.readlines()]
    return classes

def calculate_iou(box1, box2):
    """计算两个边界框的IoU"""
    # box格式: [x1, y1, x2, y2]
    x1 = max(box1[0], box2[0])
    y1 = max(box1[1], box2[1])
    x2 = min(box1[2], box2[2])
    y2 = min(box1[3], box2[3])
    
    if x2 <= x1 or y2 <= y1:
        return 0.0
    
    intersection = (x2 - x1) * (y2 - y1)
    area1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
    area2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
    union = area1 + area2 - intersection
    
    return intersection / union if union > 0 else 0.0

def convert_coco_to_ground_truth(annotations, classes):
    """将COCO格式的标注转换为ground truth格式"""
    # 创建image_id到annotations的映射
    image_annotations = defaultdict(list)
    for ann in annotations['annotations']:
        image_annotations[ann['image_id']].append(ann)
    
    ground_truth = {}
    for img_id, anns in image_annotations.items():
        gt_boxes = []
        gt_labels = []
        
        for ann in anns:
            category_id = ann['category_id'] - 1  # COCO类别ID从1开始，转换为从0开始
            if category_id < len(classes):
                # 转换bbox格式: [x, y, w, h] -> [x1, y1, x2, y2]
                x, y, w, h = ann['bbox']
                bbox = [x, y, x + w, y + h]
                
                gt_boxes.append(bbox)
                gt_labels.append(category_id)
        
        ground_truth[img_id] = {
            'boxes': gt_boxes,
            'labels': gt_labels
        }
    
    return ground_truth

def extract_predictions(results, confidence_threshold=0.3):
    """从MMDetection结果中提取预测信息，只保留置信度在阈值以上的预测"""
    predictions = {}
    
    for result in results:
        img_id = result.get('img_id', 0)
        
        if 'pred_instances' in result:
            pred_instances = result['pred_instances']
            
            # 提取预测的标签、边界框和置信度
            if hasattr(pred_instances, 'labels'):
                labels = pred_instances.labels.cpu().numpy().tolist()
            elif 'labels' in pred_instances:
                if hasattr(pred_instances['labels'], 'cpu'):
                    labels = pred_instances['labels'].cpu().numpy().tolist()
                else:
                    labels = pred_instances['labels'].tolist()
            else:
                labels = []
            
            if hasattr(pred_instances, 'bboxes'):
                bboxes = pred_instances.bboxes.cpu().numpy().tolist()
            elif 'bboxes' in pred_instances:
                if hasattr(pred_instances['bboxes'], 'cpu'):
                    bboxes = pred_instances['bboxes'].cpu().numpy().tolist()
                else:
                    bboxes = pred_instances['bboxes'].tolist()
            else:
                bboxes = []
            
            if hasattr(pred_instances, 'scores'):
                scores = pred_instances.scores.cpu().numpy().tolist()
            elif 'scores' in pred_instances:
                if hasattr(pred_instances['scores'], 'cpu'):
                    scores = pred_instances['scores'].cpu().numpy().tolist()
                else:
                    scores = pred_instances['scores'].tolist()
            else:
                scores = []
            
            # 确保所有列表长度一致
            min_length = min(len(labels), len(bboxes), len(scores))
            labels = labels[:min_length]
            bboxes = bboxes[:min_length]
            scores = scores[:min_length]
            
            # 过滤置信度在阈值以上的预测
            filtered_labels = []
            filtered_bboxes = []
            filtered_scores = []
            
            for i, score in enumerate(scores):
                if score >= confidence_threshold:
                    filtered_labels.append(labels[i])
                    filtered_bboxes.append(bboxes[i])
                    filtered_scores.append(score)
            
            predictions[img_id] = {
                'labels': filtered_labels,
                'boxes': filtered_bboxes,
                'scores': filtered_scores
            }
        else:
            predictions[img_id] = {
                'labels': [],
                'boxes': [],
                'scores': []
            }
    
    return predictions

def calculate_metrics_by_class(predictions, ground_truth, classes, iou_threshold=0.5):
    """计算每个类别的准确率和召回率"""
    class_metrics = {}
    
    # 为每个类别初始化指标
    for i, class_name in enumerate(classes):
        class_metrics[class_name] = {
            'tp': 0,  # True Positives
            'fp': 0,  # False Positives  
            'fn': 0,  # False Negatives
            'precision': 0.0,
            'recall': 0.0,
            'f1': 0.0
        }
    
    # 对每个图像计算TP/FP/FN
    for img_id in predictions.keys():
        if img_id not in ground_truth:
            continue
            
        pred = predictions[img_id]
        gt = ground_truth[img_id]
        
        # 按类别分组预测和真值
        pred_by_class = defaultdict(list)
        gt_by_class = defaultdict(list)
        
        # 分组预测
        for label, box, score in zip(pred['labels'], pred['boxes'], pred['scores']):
            pred_by_class[label].append((box, score))
        
        # 分组真值
        for label, box in zip(gt['labels'], gt['boxes']):
            gt_by_class[label].append(box)
        
        # 对每个类别计算TP/FP/FN
        for class_idx in range(len(classes)):
            class_name = classes[class_idx]
            
            pred_boxes = pred_by_class[class_idx]
            gt_boxes = gt_by_class[class_idx]
            
            # 初始化匹配状态
            gt_matched = [False] * len(gt_boxes)
            
            # 按置信度排序预测框
            pred_boxes_sorted = sorted(pred_boxes, key=lambda x: x[1], reverse=True)
            
            # 匹配预测框和真值框
            for pred_box, pred_score in pred_boxes_sorted:
                best_iou = 0
                best_gt_idx = -1
                
                for gt_idx, gt_box in enumerate(gt_boxes):
                    if not gt_matched[gt_idx]:
                        try:
                            iou = calculate_iou(pred_box, gt_box)
                            if iou > best_iou:
                                best_iou = iou
                                best_gt_idx = gt_idx
                        except Exception as e:
                            print(f"计算IoU时出错: pred_box={pred_box}, gt_box={gt_box}, 错误={e}")
                            continue
                
                # 如果IoU超过阈值，标记为TP
                if best_iou >= iou_threshold and best_gt_idx >= 0:
                    gt_matched[best_gt_idx] = True
                    class_metrics[class_name]['tp'] += 1
                else:
                    # 否则标记为FP
                    class_metrics[class_name]['fp'] += 1
            
            # 未匹配的真值框标记为FN
            for gt_matched_flag in gt_matched:
                if not gt_matched_flag:
                    class_metrics[class_name]['fn'] += 1
    
    # 计算准确率、召回率和F1分数
    for class_name in classes:
        tp = class_metrics[class_name]['tp']
        fp = class_metrics[class_name]['fp']
        fn = class_metrics[class_name]['fn']
        
        # 准确率 = TP / (TP + FP)
        precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
        class_metrics[class_name]['precision'] = precision
        
        # 召回率 = TP / (TP + FN)
        recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
        class_metrics[class_name]['recall'] = recall
        
        # F1分数 = 2 * (precision * recall) / (precision + recall)
        f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
        class_metrics[class_name]['f1'] = f1
    
    return class_metrics

def main():
    parser = argparse.ArgumentParser(description='计算每个类别的准确率和召回率')
    parser.add_argument('--result_pkl', type=str, 
                       default='/disk2/xd/project/mmdetection/work_dirs/visual/result.pkl',
                       help='预测结果pkl文件路径')
    parser.add_argument('--annotation_json', type=str,
                       default='/disk2/xd/project/mmdetection/data/coco/annotations/instances_val2017.json',
                       help='COCO格式标注文件路径')
    parser.add_argument('--classes_txt', type=str,
                       default='/disk2/xd/project/mmdetection/data/coco/classes.txt',
                       help='类别名称文件路径')
    parser.add_argument('--confidence_threshold', type=float, default=0.3,
                       help='置信度阈值，只计算置信度在此值以上的预测')
    parser.add_argument('--iou_threshold', type=float, default=0.3,
                       help='IoU阈值，用于判断TP/FP')
    
    args = parser.parse_args()
    
    print("正在加载数据...")
    
    # 加载数据
    try:
        results = load_result_pkl(args.result_pkl)
        annotations = load_coco_annotations(args.annotation_json)
        classes = load_classes(args.classes_txt)
        
        print(f"加载了 {len(classes)} 个类别: {classes}")
        print(f"预测结果数量: {len(results)}")
        print(f"标注数据数量: {len(annotations['annotations'])}")
        print(f"置信度阈值: {args.confidence_threshold}")
        print(f"IoU阈值: {args.iou_threshold}")
        
    except Exception as e:
        print(f"加载数据时出错: {e}")
        return
    
    # 转换数据格式
    print("\n正在转换数据格式...")
    try:
        ground_truth = convert_coco_to_ground_truth(annotations, classes)
        predictions = extract_predictions(results, args.confidence_threshold)
        
        print(f"转换后的ground truth数量: {len(ground_truth)}")
        print(f"转换后的预测数量: {len(predictions)}")
        
        # 统计过滤前后的预测数量
        total_predictions_before = 0
        total_predictions_after = 0
        for pred in predictions.values():
            total_predictions_after += len(pred['labels'])
        
        # 重新加载原始预测来计算过滤前的数量
        original_predictions = extract_predictions(results, 0.0)  # 不设置阈值
        for pred in original_predictions.values():
            total_predictions_before += len(pred['labels'])
        
        print(f"过滤前总预测数量: {total_predictions_before}")
        print(f"过滤后总预测数量: {total_predictions_after}")
        print(f"过滤掉的预测数量: {total_predictions_before - total_predictions_after}")
        print(f"过滤比例: {((total_predictions_before - total_predictions_after) / total_predictions_before * 100):.2f}%")
        
        # 统计真值中每个类别的数量
        gt_class_counts = defaultdict(int)
        for gt in ground_truth.values():
            for label in gt['labels']:
                gt_class_counts[classes[label]] += 1
        
        print(f"\n真值中各类别数量:")
        for class_name in classes:
            count = gt_class_counts[class_name]
            print(f"{class_name}: {count}")
            
    except Exception as e:
        print(f"转换数据格式时出错: {e}")
        return
    
    # 计算指标
    print("\n正在计算指标...")
    try:
        class_metrics = calculate_metrics_by_class(predictions, ground_truth, classes, args.iou_threshold)
        
        # 打印结果
        print("\n" + "="*80)
        print(f"每个类别的准确率和召回率 (置信度阈值: {args.confidence_threshold}, IoU阈值: {args.iou_threshold}):")
        print("="*80)
        print(f"{'类别':<15} {'准确率':<10} {'召回率':<10} {'F1分数':<10} {'TP':<5} {'FP':<5} {'FN':<5}")
        print("-"*80)
        
        total_tp = 0
        total_fp = 0
        total_fn = 0
        
        for class_name in classes:
            metrics = class_metrics[class_name]
            
            print(f"{class_name:<15} {metrics['precision']:<10.4f} {metrics['recall']:<10.4f} "
                  f"{metrics['f1']:<10.4f} {metrics['tp']:<5} {metrics['fp']:<5} {metrics['fn']:<5}")
            
            total_tp += metrics['tp']
            total_fp += metrics['fp']
            total_fn += metrics['fn']
        
        # 计算总体指标
        overall_precision = total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0.0
        overall_recall = total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0.0
        overall_f1 = 2 * (overall_precision * overall_recall) / (overall_precision + overall_recall) if (overall_precision + overall_recall) > 0 else 0.0
        
        print("-"*80)
        print(f"{'总体':<15} {overall_precision:<10.4f} {overall_recall:<10.4f} "
              f"{overall_f1:<10.4f} {total_tp:<5} {total_fp:<5} {total_fn:<5}")
        print("="*80)
        
    except Exception as e:
        import traceback
        print(f"计算指标时出错: {e}")
        print("详细错误信息:")
        traceback.print_exc()
        return

if __name__ == "__main__":
    main() 