#!/usr/bin/env python3
import torch
import numpy as np
from mmdet.apis import init_detector
from mmengine.config import Config
import cv2
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import json
import pickle
from mmengine.dataset import Compose
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import Resize, Pad, LoadAnnotations, PackDetInputs

def load_standard_test_results():
    """加载标准test.py的结果"""
    result_path = '/disk2/xd/project/mmdetection/work_dirs/visual/result.pkl'
    with open(result_path, 'rb') as f:
        results = pickle.load(f)
    
    # 找到目标图像的结果
    target_image = 'data/coco/images/val/scene-1_000003.jpg'
    target_result = None
    
    for result in results:
        if result['img_path'] == target_image:
            target_result = result
            break
    
    return target_result

def create_standard_test_pipeline():
    """创建与标准test.py完全一致的test_pipeline"""
    # 使用与标准test.py相同的配置
    test_pipeline = [
        dict(type='LoadImageFromFile'),
        dict(type='Resize', scale=(1333, 750), keep_ratio=True),
        dict(type='LoadAnnotations', with_bbox=True),
        dict(
            type='PackDetInputs',
            meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
                       'scale_factor'))
    ]
    
    return Compose(test_pipeline)

def create_custom_forward_with_standard_preprocessing(model):
    """创建使用标准预处理的自定义forward方法"""
    class CustomModel(torch.nn.Module):
        def __init__(self, original_model):
            super().__init__()
            self.model = original_model
            
        def forward(self, data_samples):
            # 使用与标准test.py相同的预处理
            # 确保输入格式正确
            inputs = data_samples['inputs']
            if inputs.dim() == 3:
                inputs = inputs.unsqueeze(0)  # 添加batch维度
            batch_inputs = {'inputs': inputs}
            processed_data = self.model.data_preprocessor(batch_inputs, training=False)
            
            # 获取模型的主要组件
            backbone = self.model.backbone
            neck = self.model.neck
            
            # 前向传播
            feat = backbone(processed_data['inputs'])
            if hasattr(self.model, 'neck') and neck is not None:
                feat = neck(feat)
            
            # 使用与标准test.py相同的batch_img_metas
            metainfo = data_samples['data_samples'].metainfo
            # 确保包含batch_input_shape
            if 'batch_input_shape' not in metainfo:
                metainfo['batch_input_shape'] = metainfo['img_shape']
            batch_img_metas = [metainfo]
            
            # 使用predict_query_head
            results_list = self.model.predict_query_head(
                feat, batch_img_metas, rescale=True)
            
            # 确保输出是确定性的张量
            bboxes = results_list[0].bboxes.reshape([1,300,4])
            labels = results_list[0].labels.reshape([1,300])
            scores = results_list[0].scores.reshape([1,300,1])
            
            # 合并bboxes和scores
            bboxes_with_scores = torch.cat((bboxes, scores), dim=2)
            
            return bboxes_with_scores, labels
    
    return CustomModel(model)

def run_consistent_preprocessing_inference():
    """运行使用一致预处理的自定义推理"""
    config_file = '/disk2/xd/project/mmdetection/projects/CO-DETR/configs/codino/co_dino_5scale_r50_8xb2_1x_coco.py'
    checkpoint_file = '/disk2/xd/project/mmdetection/work_dirs/co_dino_5scale_r50_8xb2_1x_coco/epoch_12.pth'
    image_path = '/disk2/xd/project/mmdetection/data/coco/images/val/scene-1_000003.jpg'
    
    print("=== 一致预处理推理 ===")
    
    # 加载模型
    cfg = Config.fromfile(config_file)
    model = init_detector(cfg, checkpoint_file, device='cpu')
    
    # 创建标准test_pipeline
    test_pipeline = create_standard_test_pipeline()
    
    # 准备数据（与标准test.py完全一致）
    data = dict(img_path=image_path, img_id=0)
    data = test_pipeline(data)
    
    print(f"标准预处理结果:")
    print(f"  输入形状: {data['inputs'].shape}")
    print(f"  图像元信息: {data['data_samples'].metainfo}")
    
    # 创建自定义模型
    custom_model = create_custom_forward_with_standard_preprocessing(model)
    custom_model.eval()
    
    # 准备输入数据
    input_data = {
        'inputs': data['inputs'],
        'data_samples': data['data_samples']
    }
    
    # 执行推理
    with torch.no_grad():
        bboxes_with_scores, labels = custom_model(input_data)
    
    # 提取结果
    bboxes = bboxes_with_scores[0, :, :4].cpu().numpy()  # [300, 4]
    scores = bboxes_with_scores[0, :, 4].cpu().numpy()   # [300]
    labels = labels[0].cpu().numpy()  # [300]
    
    print(f"一致预处理推理结果:")
    print(f"  检测数量: {len(bboxes)}")
    print(f"  置信度范围: [{np.min(scores):.6f}, {np.max(scores):.6f}]")
    print(f"  平均置信度: {np.mean(scores):.6f}")
    
    return bboxes, labels, scores

def apply_nms_post_processing(bboxes, labels, scores, nms_threshold=0.5, max_per_img=300):
    """应用NMS后处理"""
    if len(bboxes) == 0:
        return bboxes, labels, scores
    
    # 按置信度排序，保留前max_per_img个
    if len(scores) > max_per_img:
        sorted_indices = np.argsort(scores)[::-1]
        top_indices = sorted_indices[:max_per_img]
        bboxes = bboxes[top_indices]
        labels = labels[top_indices]
        scores = scores[top_indices]
    
    # 简单的NMS实现
    def nms(boxes, scores, labels, iou_threshold):
        if len(boxes) == 0:
            return np.array([])
        
        # 按置信度排序
        order = np.argsort(scores)[::-1]
        keep = []
        
        while order.size > 0:
            i = order[0]
            keep.append(i)
            
            if order.size == 1:
                break
            
            # 计算IoU
            xx1 = np.maximum(boxes[i, 0], boxes[order[1:], 0])
            yy1 = np.maximum(boxes[i, 1], boxes[order[1:], 1])
            xx2 = np.minimum(boxes[i, 2], boxes[order[1:], 2])
            yy2 = np.minimum(boxes[i, 3], boxes[order[1:], 3])
            
            w = np.maximum(0.0, xx2 - xx1)
            h = np.maximum(0.0, yy2 - yy1)
            inter = w * h
            
            area1 = (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1])
            area2 = (boxes[order[1:], 2] - boxes[order[1:], 0]) * (boxes[order[1:], 3] - boxes[order[1:], 1])
            union = area1 + area2 - inter
            
            iou = inter / union
            
            # 只对相同类别的框进行NMS
            same_class = (labels[order[1:]] == labels[i])
            iou = iou * same_class
            
            # 保留IoU小于阈值的框
            inds = np.where(iou <= iou_threshold)[0]
            order = order[inds + 1]
        
        return np.array(keep)
    
    # 应用NMS
    keep_indices = nms(bboxes, scores, labels, nms_threshold)
    
    if len(keep_indices) > 0:
        filtered_bboxes = bboxes[keep_indices]
        filtered_labels = labels[keep_indices]
        filtered_scores = scores[keep_indices]
    else:
        filtered_bboxes = np.array([])
        filtered_labels = np.array([])
        filtered_scores = np.array([])
    
    return filtered_bboxes, filtered_labels, filtered_scores

def visualize_comparison(image_path, standard_bboxes, standard_labels, standard_scores,
                        consistent_bboxes, consistent_labels, consistent_scores,
                        class_names, output_dir, confidence_threshold=0.3):
    """可视化比较结果"""
    img = cv2.imread(image_path)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    
    # 过滤置信度0.3以上的结果
    standard_mask = standard_scores >= confidence_threshold
    consistent_mask = consistent_scores >= confidence_threshold
    
    filtered_standard_bboxes = standard_bboxes[standard_mask]
    filtered_standard_labels = standard_labels[standard_mask]
    filtered_standard_scores = standard_scores[standard_mask]
    
    filtered_consistent_bboxes = consistent_bboxes[consistent_mask]
    filtered_consistent_labels = consistent_labels[consistent_mask]
    filtered_consistent_scores = consistent_scores[consistent_mask]
    
    # 创建三个子图
    fig, axes = plt.subplots(1, 3, figsize=(24, 8))
    
    colors = plt.cm.Set3(np.linspace(0, 1, len(class_names)))
    
    # 绘制标准test.py结果（只显示置信度0.3以上）
    axes[0].imshow(img_rgb)
    for bbox, label, score in zip(filtered_standard_bboxes, filtered_standard_labels, filtered_standard_scores):
        x1, y1, x2, y2 = bbox
        width = x2 - x1
        height = y2 - y1
        if width > 0 and height > 0:
            class_name = class_names[label] if label < len(class_names) else f"class_{label}"
            rect = patches.Rectangle((x1, y1), width, height, 
                                   linewidth=2, edgecolor=colors[label % len(colors)], 
                                   facecolor='none')
            axes[0].add_patch(rect)
            axes[0].text(x1, y1-5, f'{class_name}: {score:.3f}', 
                        bbox=dict(boxstyle="round,pad=0.3", facecolor=colors[label % len(colors)], alpha=0.7),
                        fontsize=8, color='black')
    axes[0].set_title(f'Standard test.py (Conf≥{confidence_threshold})\n{len(filtered_standard_bboxes)} objects\nConf: [{np.min(filtered_standard_scores):.3f}, {np.max(filtered_standard_scores):.3f}]')
    axes[0].axis('off')
    
    # 绘制一致预处理推理结果（只显示置信度0.3以上）
    axes[1].imshow(img_rgb)
    for bbox, label, score in zip(filtered_consistent_bboxes, filtered_consistent_labels, filtered_consistent_scores):
        x1, y1, x2, y2 = bbox
        width = x2 - x1
        height = y2 - y1
        if width > 0 and height > 0:
            class_name = class_names[label] if label < len(class_names) else f"class_{label}"
            rect = patches.Rectangle((x1, y1), width, height, 
                                   linewidth=2, edgecolor=colors[label % len(colors)], 
                                   facecolor='none')
            axes[1].add_patch(rect)
            axes[1].text(x1, y1-5, f'{class_name}: {score:.3f}', 
                        bbox=dict(boxstyle="round,pad=0.3", facecolor=colors[label % len(colors)], alpha=0.7),
                        fontsize=8, color='black')
    axes[1].set_title(f'Consistent Preprocessing (Conf≥{confidence_threshold})\n{len(filtered_consistent_bboxes)} objects\nConf: [{np.min(filtered_consistent_scores):.3f}, {np.max(filtered_consistent_scores):.3f}]')
    axes[1].axis('off')
    
    # 绘制差异分析（使用过滤后的结果）
    axes[2].imshow(img_rgb)
    
    # 找到匹配的检测框（使用过滤后的结果）
    matched_standard = set()
    matched_consistent = set()
    
    for i, (std_bbox, std_label) in enumerate(zip(filtered_standard_bboxes, filtered_standard_labels)):
        for j, (con_bbox, con_label) in enumerate(zip(filtered_consistent_bboxes, filtered_consistent_labels)):
            if std_label == con_label:
                # 计算IoU
                xx1 = max(std_bbox[0], con_bbox[0])
                yy1 = max(std_bbox[1], con_bbox[1])
                xx2 = min(std_bbox[2], con_bbox[2])
                yy2 = min(std_bbox[3], con_bbox[3])
                
                w = max(0.0, xx2 - xx1)
                h = max(0.0, yy2 - yy1)
                inter = w * h
                
                area1 = (std_bbox[2] - std_bbox[0]) * (std_bbox[3] - std_bbox[1])
                area2 = (con_bbox[2] - con_bbox[0]) * (con_bbox[3] - con_bbox[1])
                union = area1 + area2 - inter
                
                iou = inter / union if union > 0 else 0
                
                if iou > 0.5:  # IoU阈值
                    matched_standard.add(i)
                    matched_consistent.add(j)
                    
                    # 绘制匹配的框
                    x1, y1, x2, y2 = std_bbox
                    width = x2 - x1
                    height = y2 - y1
                    if width > 0 and height > 0:
                        class_name = class_names[std_label] if std_label < len(class_names) else f"class_{std_label}"
                        rect = patches.Rectangle((x1, y1), width, height, 
                                               linewidth=3, edgecolor='green', 
                                               facecolor='none')
                        axes[2].add_patch(rect)
                        axes[2].text(x1, y1-5, f'{class_name}: {iou:.2f}', 
                                    bbox=dict(boxstyle="round,pad=0.3", facecolor='green', alpha=0.7),
                                    fontsize=8, color='black')
    
    # 绘制未匹配的标准框（红色）
    for i, (bbox, label) in enumerate(zip(filtered_standard_bboxes, filtered_standard_labels)):
        if i not in matched_standard:
            x1, y1, x2, y2 = bbox
            width = x2 - x1
            height = y2 - y1
            if width > 0 and height > 0:
                class_name = class_names[label] if label < len(class_names) else f"class_{label}"
                rect = patches.Rectangle((x1, y1), width, height, 
                                       linewidth=2, edgecolor='red', 
                                       facecolor='none', linestyle='--')
                axes[2].add_patch(rect)
    
    # 绘制未匹配的一致预处理框（蓝色）
    for j, (bbox, label) in enumerate(zip(filtered_consistent_bboxes, filtered_consistent_labels)):
        if j not in matched_consistent:
            x1, y1, x2, y2 = bbox
            width = x2 - x1
            height = y2 - y1
            if width > 0 and height > 0:
                class_name = class_names[label] if label < len(class_names) else f"class_{label}"
                rect = patches.Rectangle((x1, y1), width, height, 
                                       linewidth=2, edgecolor='blue', 
                                       facecolor='none', linestyle='--')
                axes[2].add_patch(rect)
    
    axes[2].set_title(f'Comparison (Conf≥{confidence_threshold})\nGreen: Matched\nRed: Standard only\nBlue: Consistent only')
    axes[2].axis('off')
    
    plt.tight_layout()
    vis_path = Path(output_dir) / "consistent_preprocessing_comparison.jpg"
    plt.savefig(str(vis_path), dpi=150, bbox_inches='tight')
    plt.close()
    
    print(f"比较结果已保存到: {vis_path}")
    
    # 返回匹配统计（使用过滤后的结果）
    return len(matched_standard), len(matched_consistent), len(filtered_standard_bboxes), len(filtered_consistent_bboxes)

def analyze_results(standard_bboxes, standard_labels, standard_scores,
                   consistent_bboxes, consistent_labels, consistent_scores,
                   class_names):
    """分析结果"""
    print("\n=== 结果分析 ===")
    
    # 检测数量比较
    print(f"检测数量比较:")
    print(f"  标准test.py: {len(standard_bboxes)} 个")
    print(f"  一致预处理推理: {len(consistent_bboxes)} 个")
    print(f"  差异: {abs(len(standard_bboxes) - len(consistent_bboxes))} 个")
    
    # 置信度分布比较
    print(f"\n置信度分布比较:")
    print(f"  标准test.py: 范围[{np.min(standard_scores):.6f}, {np.max(standard_scores):.6f}], 平均{np.mean(standard_scores):.6f}")
    print(f"  一致预处理推理: 范围[{np.min(consistent_scores):.6f}, {np.max(consistent_scores):.6f}], 平均{np.mean(consistent_scores):.6f}")
    
    # 类别分布比较
    print(f"\n类别分布比较:")
    
    print(f"  标准test.py类别分布:")
    unique_labels, counts = np.unique(standard_labels, return_counts=True)
    for label, count in zip(unique_labels, counts):
        class_name = class_names[label] if label < len(class_names) else f"class_{label}"
        print(f"    {class_name}: {count} 个")
    
    print(f"  一致预处理推理类别分布:")
    unique_labels, counts = np.unique(consistent_labels, return_counts=True)
    for label, count in zip(unique_labels, counts):
        class_name = class_names[label] if label < len(class_names) else f"class_{label}"
        print(f"    {class_name}: {count} 个")

def main():
    # 配置路径
    image_path = '/disk2/xd/project/mmdetection/data/coco/images/val/scene-1_000003.jpg'
    output_dir = '/disk2/xd/project/mmdetection/results'
    
    Path(output_dir).mkdir(exist_ok=True)
    
    # 类别名称
    class_names = [
        'car', 'truck', 'pedestrian', 'traffic_cone', 'forklift',
        'tractor', 'flatbed', 'trailer', 'cyclist', 'bicycle',
        'water_horse', 'guardrail', 'unknown'
    ]
    
    # 运行一致预处理推理
    consistent_bboxes, consistent_labels, consistent_scores = run_consistent_preprocessing_inference()
    
    # 应用NMS后处理
    filtered_bboxes, filtered_labels, filtered_scores = apply_nms_post_processing(
        consistent_bboxes, consistent_labels, consistent_scores, nms_threshold=0.5, max_per_img=300
    )
    
    print(f"NMS后处理结果:")
    print(f"  检测数量: {len(filtered_bboxes)}")
    print(f"  置信度范围: [{np.min(filtered_scores):.6f}, {np.max(filtered_scores):.6f}]")
    print(f"  平均置信度: {np.mean(filtered_scores):.6f}")
    
    # 加载标准test.py结果
    standard_result = load_standard_test_results()
    if standard_result is None:
        print("无法加载标准test.py结果")
        return
    
    pred_instances = standard_result['pred_instances']
    standard_bboxes = pred_instances['bboxes']
    standard_labels = pred_instances['labels']
    standard_scores = pred_instances['scores']
    
    # 转换为numpy数组
    if hasattr(standard_bboxes, 'cpu'):
        standard_bboxes = standard_bboxes.cpu().numpy()
    if hasattr(standard_labels, 'cpu'):
        standard_labels = standard_labels.cpu().numpy()
    if hasattr(standard_scores, 'cpu'):
        standard_scores = standard_scores.cpu().numpy()
    
    print(f"\n标准test.py结果:")
    print(f"  检测数量: {len(standard_bboxes)}")
    print(f"  置信度范围: [{np.min(standard_scores):.6f}, {np.max(standard_scores):.6f}]")
    print(f"  平均置信度: {np.mean(standard_scores):.6f}")
    
    # 分析结果
    analyze_results(standard_bboxes, standard_labels, standard_scores,
                   filtered_bboxes, filtered_labels, filtered_scores,
                   class_names)
    
    # 可视化比较（只显示置信度0.3以上的结果）
    matched_std, matched_con, total_std, total_con = visualize_comparison(
        image_path, standard_bboxes, standard_labels, standard_scores,
        filtered_bboxes, filtered_labels, filtered_scores,
        class_names, output_dir, confidence_threshold=0.3
    )
    
    # 保存分析结果
    analysis_results = {
        'standard_test': {
            'count': len(standard_bboxes),
            'confidence_range': [float(np.min(standard_scores)), float(np.max(standard_scores))],
            'average_confidence': float(np.mean(standard_scores)),
            'class_distribution': {}
        },
        'consistent_preprocessing': {
            'count': len(filtered_bboxes),
            'confidence_range': [float(np.min(filtered_scores)), float(np.max(filtered_scores))],
            'average_confidence': float(np.mean(filtered_scores)),
            'class_distribution': {}
        },
        'matching_analysis': {
            'matched_standard': matched_std,
            'matched_consistent': matched_con,
            'total_standard': total_std,
            'total_consistent': total_con,
            'matching_rate_standard': float(matched_std / total_std) if total_std > 0 else 0.0,
            'matching_rate_consistent': float(matched_con / total_con) if total_con > 0 else 0.0
        }
    }
    
    # 添加类别分布
    unique_labels, counts = np.unique(standard_labels, return_counts=True)
    for label, count in zip(unique_labels, counts):
        class_name = class_names[label] if label < len(class_names) else f"class_{label}"
        analysis_results['standard_test']['class_distribution'][class_name] = int(count)
    
    unique_labels, counts = np.unique(filtered_labels, return_counts=True)
    for label, count in zip(unique_labels, counts):
        class_name = class_names[label] if label < len(class_names) else f"class_{label}"
        analysis_results['consistent_preprocessing']['class_distribution'][class_name] = int(count)
    
    json_path = Path(output_dir) / "consistent_preprocessing_analysis_results.json"
    with open(json_path, 'w', encoding='utf-8') as f:
        json.dump(analysis_results, f, indent=2, ensure_ascii=False)
    
    # 添加置信度过滤的统计
    standard_high_conf_mask = standard_scores >= 0.3
    consistent_high_conf_mask = filtered_scores >= 0.3
    
    standard_high_conf_count = np.sum(standard_high_conf_mask)
    consistent_high_conf_count = np.sum(consistent_high_conf_mask)
    
    print(f"\n置信度≥0.3的检测统计:")
    print(f"  标准test.py: {standard_high_conf_count} 个")
    print(f"  一致预处理推理: {consistent_high_conf_count} 个")
    
    print(f"\n分析结果已保存到JSON: {json_path}")
    print(f"匹配分析 (置信度≥0.3):")
    print(f"  标准test.py匹配率: {matched_std}/{total_std} ({matched_std/total_std*100:.1f}%)")
    print(f"  一致预处理推理匹配率: {matched_con}/{total_con} ({matched_con/total_con*100:.1f}%)")
    
    print("\n一致预处理推理完成!")

if __name__ == "__main__":
    main() 