import torch
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import glob  # Import glob for finding files
from mmengine.config import Config
from mmengine.registry import init_default_scope
from mmengine.dataset import default_collate

# MMCV 和 MMDetection 导入
from mmcv.transforms import LoadImageFromFile, Compose
from mmdet.apis import init_detector
from mmdet.datasets.transforms import Resize, PackDetInputs
from mmdet.structures.bbox import HorizontalBoxes  # 确保注册

# --- 1. 配置参数 ---
# --- 修改：配置文件路径 ---
config_fpn_path = '/media/ross/8TB/project/lsh/deep_learning/DiffusionDet_mmdet/DiffusionDet/SOTA/work_dirs/diffusiondet_r50_fpn_epoch_microalgeaOri/diffusiondet_r50_fpn_epoch_microalgeaOri.py'
config_pafpn_path = '/media/ross/8TB/project/lsh/deep_learning/LC_CenterNet/work_dirs/diffusiondet_r50_bifpn_500-proposals_1-step_crop-ms-480-800-450k_epoch_microalgea/diffusiondet_r50_bifpn_500-proposals_1-step_crop-ms-480-800-450k_epoch_microalgea.py'
config_lamfpn_path = '/media/ross/8TB/project/lsh/deep_learning/DiffusionDet_mmdet/DiffusionDet/work_dirs/ablation/config/diffusiondet_r50_lamfpn8_epoch_microalgeaOri_0lcm2_0adem2_0ddim4_0distill4_memeryOptim/diffusiondet_r50_lamfpn8_epoch_microalgeaOri_0lcm2_0adem2_0ddim4_0distill4_memeryOptim.py'

# --- 修改：权重文件路径 ---
# 注意：这里假设所有模型使用相同的权重。如果不同，需要在加载模型时指定各自的路径。
checkpoint_path_common = '/media/ross/8TB/project/lsh/deep_learning/DiffusionDet_mmdet/DiffusionDet/work_dirs/ablation/config/diffusiondet_r50_lamfpn8_epoch_microalgeaOri_0lcm2_0adem2_0ddim4_0distill4_memeryOptim/best_coco_bbox_mAP_epoch_44.pth'
checkpoint_pafpn_path = '/media/ross/8TB/project/lsh/deep_learning/LC_CenterNet/work_dirs/diffusiondet_r50_bifpn_500-proposals_1-step_crop-ms-480-800-450k_epoch_microalgea/epoch_100.pth'

# --- 修改：输入文件夹路径 ---
input_folder = '/media/ross/8TB/project/lsh/deep_learning/DiffusionDet_mmdet/DiffusionDet/tools/mymodel_analysis_tools/figures_training/images'  # <--- 指定包含图片的文件夹
# 输出保存路径
output_dir = './feature_map_visualizations/'
os.makedirs(output_dir, exist_ok=True)
# 支持的图片文件扩展名
image_extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp', '*.tif', '*.tiff']
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'


# --- Helper Function: 特征图处理与可视化 (保持不变) ---
def process_feature_map(feature_map_tensor, target_size):
    """处理单个特征图以便可视化"""
    if feature_map_tensor.dim() == 4 and feature_map_tensor.size(0) == 1:
        feat = feature_map_tensor.squeeze(0)
    elif feature_map_tensor.dim() == 3:
        feat = feature_map_tensor
    else:
        print(f"Warning: Unexpected feature map dimension: {feature_map_tensor.shape}. Attempting to process.")
        if feature_map_tensor.dim() > 3:
            feat = feature_map_tensor[0]
        else:
            feat = feature_map_tensor
        if feat.dim() != 3: raise ValueError(f"Cannot process feature map with shape {feature_map_tensor.shape} to 3D")
    feat = feat.mean(dim=0)
    feat = (feat - feat.min()) / (feat.max() - feat.min() + 1e-6)
    feat_np = feat.cpu().numpy()
    feat_resized = cv2.resize(feat_np, target_size, interpolation=cv2.INTER_LINEAR)
    return feat_resized


# --- 2. 定义要加载的模型配置 ---
# 修改顺序为 FPN、PAFPN、LAMFPN
model_configs_to_load = [
    {'name': 'FPN', 'config': config_fpn_path, 'ckpt': checkpoint_path_common},
    # {'name': 'PAFPN', 'config': config_pafpn_path, 'ckpt': checkpoint_pafpn_path},
    {'name': 'LAMFPN', 'config': config_lamfpn_path, 'ckpt': checkpoint_path_common},
]

# --- 3. 加载模型 ---
models = {}
configs = {}
print("Loading models...")
for model_info in model_configs_to_load:
    name = model_info['name']
    config_path = model_info['config']
    ckpt_path = model_info['ckpt']  # 使用指定的检查点

    if not os.path.exists(config_path):
        print(f"Warning: Config file not found for {name}: {config_path}. Skipping.")
        continue
    if not os.path.exists(ckpt_path):
        print(
            f"Warning: Checkpoint file not found for {name}: {ckpt_path}. Using this config might fail or use random weights if model init doesn't require ckpt.")
        # Decide how to handle missing checkpoint: skip model, load without weights, etc.
        # For now, we proceed, but init_detector might fail later if ckpt is crucial.

    print(f"  Loading {name} model from {config_path}...")
    try:
        cfg = Config.fromfile(config_path)
        configs[name] = cfg
        # --- 初始化默认范围 ---
        init_default_scope(cfg.get('default_scope', 'mmdet'))
        # --- 初始化模型 ---
        # Note: init_detector might load weights specified *inside* the config if ckpt_path is None
        model = init_detector(cfg, ckpt_path, device=device)
        model.eval()
        models[name] = model
    except Exception as e:
        print(f"Error loading model {name}: {e}")

if not models:
    print("Error: No models were loaded successfully. Exiting.")
    exit()
print("Models loaded:", list(models.keys()))

# --- 4. 构建数据处理 Pipeline (假设对所有模型相同) ---
# 获取第一个成功加载的模型的配置来构建 pipeline
# (如果不同模型的 pipeline 不同，需要更复杂的处理)
first_loaded_model_name = list(models.keys())[0]
cfg_for_pipeline = configs[first_loaded_model_name]
# 检查配置中是否有 test_pipeline，否则使用默认的
if hasattr(cfg_for_pipeline, 'test_pipeline'):
    test_pipeline_cfg = cfg_for_pipeline.test_pipeline
    # MMDetection 3.x 可能直接是列表，旧版本可能在 dataset/pipeline 下
    # Safely access potential nested structure if needed, this example assumes direct list
else:
    # Fallback pipeline if not defined in config (adjust as needed)
    print("Warning: test_pipeline not found in config, using default.")
    test_pipeline_cfg = [
        dict(type='LoadImageFromFile', backend_args=None, imdecode_backend='pillow'),
        dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
        dict(type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor'))
    ]

# 确保在 Compose 之前重新初始化范围，以防万一
init_default_scope(cfg_for_pipeline.get('default_scope', 'mmdet'))
test_pipeline = Compose(test_pipeline_cfg)
print("Data pipeline created.")

# --- 5. 查找并处理文件夹中的所有图片 ---
image_files = []
for ext in image_extensions:
    image_files.extend(glob.glob(os.path.join(input_folder, ext)))

if not image_files:
    print(f"Error: No images found in folder: {input_folder}")
else:
    print(f"Found {len(image_files)} images to process.")

    for img_path in image_files:
        print(f"\n--- Processing image: {os.path.basename(img_path)} ---")
        try:
            # --- 5.1 加载当前图像 ---
            original_img_bgr = cv2.imread(img_path)
            if original_img_bgr is None:
                print(f"Warning: Could not read image {img_path}. Skipping.")
                continue
            original_img_rgb = cv2.cvtColor(original_img_bgr, cv2.COLOR_BGR2RGB)
            h, w = original_img_bgr.shape[:2]
            target_vis_size = (w, h)

            # --- 5.2 准备数据 ---
            data_input = test_pipeline(dict(img_path=img_path))
            data_batch = default_collate([data_input])

            # --- 5.3 提取特征图 ---
            feature_maps = {}
            neck_output_levels = {}
            max_levels = 0
            with torch.no_grad():
                # Iterate through the *loaded* models
                for neck_type, model in models.items():
                    print(f"  Extracting features using {neck_type}...")
                    try:
                        data = model.data_preprocessor(data_batch, False)
                        if 'inputs' not in data or data['inputs'] is None or data['inputs'].numel() == 0:
                            print(f"Error: No valid 'inputs' found after data preprocessing for {neck_type}.")
                            continue

                        x = model.backbone(data['inputs'])
                        neck_outputs = model.neck(x)
                        feature_maps[neck_type] = neck_outputs
                        num_levels = len(neck_outputs)
                        neck_output_levels[neck_type] = num_levels
                        max_levels = max(max_levels, num_levels)
                        print(
                            f"    Extracted {num_levels} feature map levels (P{2} to P{2 + num_levels - 1}) for {neck_type}.")
                    except Exception as e:
                        print(f"Error during feature extraction for {neck_type}: {e}")
                        # Ensure this model doesn't contribute potentially partial results
                        if neck_type in feature_maps: del feature_maps[neck_type]
                        if neck_type in neck_output_levels: del neck_output_levels[neck_type]

            # --- 5.4 可视化 ---
            if not feature_maps:
                print("  No feature maps were extracted successfully for this image. Skipping visualization.")
                continue

            # --- 修改：增加行数以容纳所有模型 ---
            num_rows = len(models) + 1  # Original + one row per model
            num_cols = max_levels + 1  # Original Image + P2 + P3 + ...

            plt.figure(figsize=(num_cols * 4, num_rows * 4))  # Adjusted figsize might be needed
            plt.suptitle(f"Feature Maps Comparison: {os.path.basename(img_path)}", fontsize=16)

            # --- Row 1: Original Image ---
            plt.subplot(num_rows, num_cols, 1)
            plt.imshow(original_img_rgb)
            plt.title('Original Image')
            plt.axis('off')

            # --- Dynamically Plot Rows for each loaded model ---
            plot_row_idx = 1  # Start plotting models from the second row

            # 保持模型加载时的顺序进行可视化
            for neck_type in models.keys():
                if neck_type not in feature_maps:  # Skip if feature extraction failed
                    print(f"Skipping plotting for {neck_type} due to previous errors.")
                    # Optionally add an empty row placeholder here if needed for alignment
                    plot_row_idx += 1
                    continue

                current_feats = feature_maps[neck_type]
                num_current_levels = neck_output_levels[neck_type]

                for i in range(max_levels):  # Iterate up to max_levels for consistent columns
                    subplot_idx = plot_row_idx * num_cols + (i + 2)  # Calculate subplot index
                    plt.subplot(num_rows, num_cols, subplot_idx)
                    level_p_num = i + 2  # Calculate P-level number (P2, P3, ...)

                    if i < num_current_levels:  # Check if this level exists for the current model
                        try:
                            processed_feat = process_feature_map(current_feats[i].detach(), target_vis_size)
                            plt.imshow(processed_feat, cmap='viridis')
                            plt.title(f'{neck_type} - P{level_p_num}')
                        except Exception as e:
                            print(f"Error processing/plotting {neck_type} P{level_p_num}: {e}")
                            plt.title(f'{neck_type} - P{level_p_num} (Error)')
                    else:
                        plt.title(f'{neck_type} - P{level_p_num} (N/A)')  # Placeholder if model has fewer levels
                    plt.axis('off')

                plot_row_idx += 1  # Move to the next row for the next model

            # --- Final Touches ---
            plt.tight_layout(rect=[0, 0.03, 1, 0.95])
            base_filename = os.path.splitext(os.path.basename(img_path))[0]
            # Make filename reflect which models were included
            included_models_str = "_".join(sorted(feature_maps.keys()))  # e.g., FPN_LAMFPN_PAFPN
            output_filename = os.path.join(output_dir, f"neck_outputs_{included_models_str}_{base_filename}.png")
            plt.savefig(output_filename, dpi=300, bbox_inches='tight')
            print(f"  Neck output visualization saved to: {output_filename}")
            plt.close()  # Close the current figure

        except Exception as e:
            print(f"Error processing image {img_path}: {e}")
            plt.close()  # Ensure plot is closed on error
            continue

print("\n--- Processing complete. ---")
