from pathlib import Path
from functools import partial
import argparse
import yaml
import json
from collections import defaultdict
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')

import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import jaccard_score, f1_score

from model.myunet import HighResolutionModel
from model.aynet import AYNet
from model.losses import ComprehensiveLoss
from model.metrics import tensor_accessment
from utils.sensors import preprocess_sensor_data
from data.pa_us_dataset import PaUsDataset
from utils.plots import plot_images


class DatasetAnalyzer:
    """数据集分析器，用于评估模型在整个数据集上的表现"""
    
    def __init__(self, model, loss_fn, device='cpu', bg_threshold=0.1, data_range=2.0, has_segmentation=False):
        self.model = model
        self.loss_fn = loss_fn
        self.device = device
        self.bg_threshold = bg_threshold
        self.data_range = data_range
        self.has_segmentation = has_segmentation
        self.model.eval()
        
        # 存储分析结果
        self.results = []
        self.bg_fg_ratios = []
        self.metrics_summary = defaultdict(list)
        
    def calculate_bg_fg_ratio(self, image_tensor):
        """计算图像的背景/前景像素比例"""
        # 将图像标准化到0-1范围
        normalized = (image_tensor - image_tensor.min()) / (image_tensor.max() - image_tensor.min() + 1e-8)
        
        # 使用阈值分割背景和前景
        background_mask = normalized <= self.bg_threshold
        foreground_mask = normalized > self.bg_threshold
        
        bg_pixels = background_mask.sum().float()
        fg_pixels = foreground_mask.sum().float()
        
        # 计算比例，避免除零
        if fg_pixels > 0:
            ratio = bg_pixels / fg_pixels
        else:
            ratio = torch.tensor(float('inf'))
            
        return ratio.item()
    
    def calculate_segmentation_metrics(self, pred_seg, target_seg, threshold=0.5):
        """计算分割指标"""
        # 将预测和目标转为二值掩码
        pred_binary = (pred_seg > threshold).float()
        target_binary = (target_seg > threshold).float()
        
        # 转换为numpy用于sklearn计算
        pred_np = pred_binary.cpu().numpy().flatten()
        target_np = target_binary.cpu().numpy().flatten()
        
        # 计算IoU (Jaccard Index)
        iou = jaccard_score(target_np, pred_np, average='binary', zero_division=0)
        
        # 计算Dice系数 (F1-score)
        dice = f1_score(target_np, pred_np, average='binary', zero_division=0)
        
        # 计算像素准确率
        pixel_acc = (pred_np == target_np).mean()
        
        # 计算敏感性和特异性
        tp = ((pred_np == 1) & (target_np == 1)).sum()
        tn = ((pred_np == 0) & (target_np == 0)).sum()
        fp = ((pred_np == 1) & (target_np == 0)).sum()
        fn = ((pred_np == 0) & (target_np == 1)).sum()
        
        sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
        specificity = tn / (tn + fp) if (tn + fp) > 0 else 0
        
        return {
            'iou': iou,
            'dice': dice,
            'pixel_accuracy': pixel_acc,
            'sensitivity': sensitivity,
            'specificity': specificity
        }
    
    def analyze_sample(self, sample_idx, sensor_data, target, bfimg, file_info=None):
        """分析单个样本"""
        with torch.no_grad():
            # 添加batch维度
            if sensor_data.dim() == 3:
                sensor_data = sensor_data.unsqueeze(0)
            if target.dim() == 3:
                target = target.unsqueeze(0)
            if bfimg.dim() == 3:
                bfimg = bfimg.unsqueeze(0)
            
            # 移动到设备
            sensor_data = sensor_data.to(self.device)
            target = target.to(self.device)
            bfimg = bfimg.to(self.device)
            
            # 模型预测
            try:
                model_output = self.model(sensor_data, bfimg)
                
                # 处理不同类型的模型输出
                if isinstance(model_output, tuple):
                    # 如果模型返回多个输出 (重建图像, 分割掩码)
                    prediction, seg_prediction = model_output
                    has_segmentation_output = True
                elif hasattr(self.model, 'segmentation_head') and self.has_segmentation:
                    # 如果模型有分割头但只返回重建结果，需要单独获取分割结果
                    prediction = model_output
                    # 尝试获取分割输出（这可能需要根据具体模型架构调整）
                    try:
                        seg_prediction = self.model.segmentation_head(self.model.get_features(sensor_data, bfimg))
                        has_segmentation_output = True
                    except:
                        has_segmentation_output = False
                        seg_prediction = None
                else:
                    prediction = model_output
                    has_segmentation_output = False
                    seg_prediction = None
                
                # 验证预测结果
                if torch.isnan(prediction).any() or torch.isinf(prediction).any():
                    raise ValueError("预测结果包含NaN或Inf")
                
                # 计算重建损失
                loss_dict = self.loss_fn(prediction, target)
                
                # 计算PSNR和SSIM
                pred_np = prediction.detach().cpu().numpy()
                target_np = target.cpu().numpy()
                
                mpsnr, mssim, _, _ = tensor_accessment(
                    x_pred=pred_np,
                    x_true=target_np,
                    data_range=self.data_range,
                    multi_dimension=False
                )
                
                # 计算背景/前景比例
                bg_fg_ratio = self.calculate_bg_fg_ratio(target)
                
                # 存储结果
                result = {
                    'sample_idx': sample_idx,
                    'file_info': file_info,
                    'bg_fg_ratio': bg_fg_ratio,
                    'total_loss': loss_dict['loss'].item(),
                    'perceptual_loss': loss_dict.get('perceptual_loss', torch.tensor(0.0)).item(),
                    'ms_ssim_loss': loss_dict.get('ms_ssim_loss', torch.tensor(0.0)).item(),
                    'l1_loss': loss_dict.get('l1_loss', torch.tensor(0.0)).item(),
                    'l2_loss': loss_dict.get('l2_loss', torch.tensor(0.0)).item(),
                    'mpsnr': mpsnr,
                    'mssim': mssim,
                    'prediction_mean': prediction.mean().item(),
                    'prediction_std': prediction.std().item(),
                    'target_mean': target.mean().item(),
                    'target_std': target.std().item()
                }
                
                # 如果有分割输出，计算分割指标
                if has_segmentation_output and seg_prediction is not None:
                    # 假设目标图像可以用作分割掩码的真值（或者需要单独的分割标签）
                    # 这里使用阈值方法从目标图像生成分割掩码
                    target_seg = (target > self.bg_threshold).float()
                    
                    seg_metrics = self.calculate_segmentation_metrics(seg_prediction, target_seg)
                    result.update({
                        'has_segmentation': True,
                        'seg_iou': seg_metrics['iou'],
                        'seg_dice': seg_metrics['dice'],
                        'seg_pixel_accuracy': seg_metrics['pixel_accuracy'],
                        'seg_sensitivity': seg_metrics['sensitivity'],
                        'seg_specificity': seg_metrics['specificity']
                    })
                else:
                    result.update({
                        'has_segmentation': False,
                        'seg_iou': 0.0,
                        'seg_dice': 0.0,
                        'seg_pixel_accuracy': 0.0,
                        'seg_sensitivity': 0.0,
                        'seg_specificity': 0.0
                    })
                
                return result
                
            except Exception as e:
                print(f"Error processing sample {sample_idx}: {str(e)}")
                return None
    
    def analyze_dataset(self, dataset, max_samples=None, batch_size=1):
        """分析整个数据集"""
        dataset_size = len(dataset)
        if max_samples is not None:
            dataset_size = min(dataset_size, max_samples)
        
        print(f"开始分析数据集，共 {dataset_size} 个样本...")
        
        self.results = []
        
        # 使用DataLoader进行批处理（如果需要）
        if batch_size > 1:
            from torch.utils.data import DataLoader
            dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
            
            sample_idx = 0
            for batch in tqdm(dataloader, desc="分析进度"):
                if max_samples is not None and sample_idx >= max_samples:
                    break
                
                sensor_data, target, bfimg = batch
                batch_size_actual = sensor_data.size(0)
                
                for i in range(batch_size_actual):
                    if max_samples is not None and sample_idx >= max_samples:
                        break
                    
                    # 获取文件信息
                    file_info = None
                    if hasattr(dataset, 'get_file_info'):
                        file_info = dataset.get_file_info(sample_idx)
                    
                    result = self.analyze_sample(
                        sample_idx,
                        sensor_data[i],
                        target[i],
                        bfimg[i],
                        file_info=file_info
                    )
                    
                    if result is not None:
                        self.results.append(result)
                    
                    sample_idx += 1
        else:
            # 逐个样本处理
            for i in tqdm(range(dataset_size), desc="分析进度"):
                sensor_data, target, bfimg = dataset[i]
                
                # 获取文件信息
                file_info = None
                if hasattr(dataset, 'get_file_info'):
                    file_info = dataset.get_file_info(i)
                elif hasattr(dataset, 'names') and i < len(dataset.names):
                    # 如果数据集有image_paths属性，使用它
                    file_info = {
                        'image_path': str(dataset.names[i]),
                        'sample_name': Path(dataset.names[i]).stem
                    }
                
                result = self.analyze_sample(i, sensor_data, target, bfimg, file_info=file_info)
                
                if result is not None:
                    self.results.append(result)
        
        print(f"分析完成，成功处理 {len(self.results)} 个样本")
        return self.results

    def _convert_to_json_serializable(self, obj):
        """Convert non-JSON serializable objects to JSON-compatible formats"""
        if isinstance(obj, dict):
            return {k: self._convert_to_json_serializable(v) for k, v in obj.items()}
        elif isinstance(obj, list):
            return [self._convert_to_json_serializable(item) for item in obj]
        elif isinstance(obj, (np.integer, np.floating)):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, bool):
            return bool(obj)  # Explicit conversion to Python bool
        elif isinstance(obj, (np.bool_, )):
            return bool(obj)
        elif obj is None:
            return None
        elif isinstance(obj, (int, float, str)):
            return obj
        else:
            # For any other type, try to convert to string
            return str(obj)

    def save_results(self, output_dir, save_problematic_images=True):
        """保存分析结果"""
        output_dir = Path(output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)
        
        # 保存原始结果 - 转换为JSON兼容格式
        results_file = output_dir / 'analysis_results.json'
        json_compatible_results = self._convert_to_json_serializable(self.results)
        with open(results_file, 'w', encoding='utf-8') as f:
            json.dump(json_compatible_results, f, indent=2, ensure_ascii=False)
        
        # 保存统计信息 - 转换为JSON兼容格式
        stats = self.generate_statistics()
        if stats:
            stats_file = output_dir / 'statistics.json'
            json_compatible_stats = self._convert_to_json_serializable(stats)
            with open(stats_file, 'w', encoding='utf-8') as f:
                json.dump(json_compatible_stats, f, indent=2, ensure_ascii=False)
        
        # 保存问题样本
        problematic_samples = self.identify_problematic_samples()
        if len(problematic_samples) > 0:
            problematic_file = output_dir / 'problematic_samples.csv'
            problematic_samples.to_csv(problematic_file, index=False, encoding='utf-8')
            
            # 保存markdown报告
            self._save_markdown_report(output_dir, stats, problematic_samples)
            
            # 保存样本ID到文件路径的映射
            self._save_sample_mapping(output_dir)
        
        return output_dir
    
    def _save_sample_mapping(self, output_dir):
        """保存样本ID到文件路径的映射"""
        mapping_file = output_dir / 'sample_mapping.json'
        
        sample_mapping = {}
        for result in self.results:
            sample_idx = result['sample_idx']
            file_info = result.get('file_info')
            
            if file_info:
                sample_mapping[sample_idx] = file_info
            else:
                sample_mapping[sample_idx] = {
                    'image_path': f"Unknown (sample_{sample_idx})",
                    'sample_name': f"sample_{sample_idx}"
                }
        
        with open(mapping_file, 'w', encoding='utf-8') as f:
            json.dump(sample_mapping, f, indent=2, ensure_ascii=False)
        
        print(f"样本映射文件已保存: {mapping_file}")

    def _save_markdown_report(self, output_dir, stats, problematic_samples):
        """保存markdown格式的分析报告"""
        report_file = output_dir / 'analysis_report.md'
        
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("# 数据集分析报告\n\n")
            f.write(f"## 总体统计\n\n")
            f.write(f"- **总样本数**: {stats['total_samples']}\n")
            f.write(f"- **问题样本数**: {len(problematic_samples)}\n")
            f.write(f"- **问题样本比例**: {len(problematic_samples)/stats['total_samples']*100:.2f}%\n\n")
            
            f.write("## 背景/前景比例统计\n\n")
            bg_stats = stats['bg_fg_ratio_stats']
            f.write(f"- **均值**: {bg_stats['mean']:.3f}\n")
            f.write(f"- **标准差**: {bg_stats['std']:.3f}\n")
            f.write(f"- **中位数**: {bg_stats['median']:.3f}\n")
            f.write(f"- **最小值**: {bg_stats['min']:.3f}\n")
            f.write(f"- **最大值**: {bg_stats['max']:.3f}\n")
            f.write(f"- **25%分位数**: {bg_stats['q25']:.3f}\n")
            f.write(f"- **75%分位数**: {bg_stats['q75']:.3f}\n\n")
            
            f.write("## 模型性能指标统计\n\n")
            f.write("| 指标 | 均值 | 标准差 | 中位数 | 最小值 | 最大值 |\n")
            f.write("|------|------|--------|--------|--------|--------|\n")
            
            for metric, metric_stats in stats['metrics_stats'].items():
                f.write(f"| {metric} | {metric_stats['mean']:.4f} | {metric_stats['std']:.4f} | "
                       f"{metric_stats['median']:.4f} | {metric_stats['min']:.4f} | {metric_stats['max']:.4f} |\n")
            
            # 添加主导损失组件分析
            if len(problematic_samples) > 0 and 'dominant_loss' in problematic_samples.columns:
                f.write("\n## 主导损失组件分析\n\n")
                dominant_loss_counts = problematic_samples['dominant_loss'].value_counts()
                f.write("在问题样本中，各损失组件的主导频次：\n\n")
                for loss_type, count in dominant_loss_counts.items():
                    percentage = count / len(problematic_samples) * 100
                    f.write(f"- **{loss_type}**: {count} 个样本 ({percentage:.1f}%)\n")
                f.write("\n")
            
            f.write("\n## 问题样本详情\n\n")
            if len(problematic_samples) > 0:
                f.write("### 前20个最严重的问题样本\n\n")
                f.write("| 样本ID | 文件路径 | BG/FG比例 | 总损失 | 感知损失 | MS-SSIM损失 | L1损失 | L2损失 | PSNR | SSIM | 主导损失 | 问题类型 |\n")
                f.write("|--------|----------|-----------|--------|----------|-------------|--------|--------|------|------|----------|----------|\n")
                
                top_problematic = problematic_samples.head(20)
                for _, row in top_problematic.iterrows():
                    file_info = row.get('file_info', {})
                    file_path = file_info.get('sample_name', f"sample_{row['sample_idx']}") if file_info else f"sample_{row['sample_idx']}"
                    dominant_loss = row.get('dominant_loss', 'Unknown')
                    
                    f.write(f"| {row['sample_idx']} | {file_path} | {row['bg_fg_ratio']:.3f} | "
                           f"{row['total_loss']:.4f} | {row.get('perceptual_loss', 0):.4f} | "
                           f"{row.get('ms_ssim_loss', 0):.4f} | {row.get('l1_loss', 0):.4f} | "
                           f"{row.get('l2_loss', 0):.4f} | {row['mpsnr']:.2f} | "
                           f"{row['mssim']:.4f} | {dominant_loss} | {row['issue_type'].strip()} |\n")
            else:
                f.write("未发现明显的问题样本。\n")
            
            f.write("\n## 如何查找问题样本\n\n")
            f.write("1. 查看 `problematic_samples.csv` 文件获取问题样本列表\n")
            f.write("2. 使用 `sample_mapping.json` 文件通过样本ID查找对应的文件路径\n")
            f.write("3. 样本ID对应数据集中的索引位置\n\n")
            
            # 根据统计结果给出建议
            avg_psnr = stats['metrics_stats'].get('mpsnr', {}).get('mean', 0)
            avg_ssim = stats['metrics_stats'].get('mssim', {}).get('mean', 0)
            
            if avg_psnr > 30 and avg_ssim > 0.8:
                f.write("- **模型整体性能良好**，大部分样本的重建质量较高。\n")
            elif avg_psnr > 20 and avg_ssim > 0.6:
                f.write("- **模型性能中等**，仍有改进空间。\n")
            else:
                f.write("- **模型性能有待提升**，建议调整模型架构或训练策略。\n")
            
            if len(problematic_samples) / stats['total_samples'] > 0.2:
                f.write("- **问题样本比例较高**，建议重点关注数据质量和预处理步骤。\n")
            
            # BG/FG比例分析
            if bg_stats['std'] > bg_stats['mean']:
                f.write("- **背景/前景比例变化较大**，建议针对不同比例的样本调整训练策略。\n")

    def find_sample_by_id(self, sample_id):
        """通过样本ID查找样本信息"""
        for result in self.results:
            if result['sample_idx'] == sample_id:
                return result
        return None
    
    def get_problematic_sample_paths(self):
        """获取问题样本的文件路径列表"""
        problematic_samples = self.identify_problematic_samples()
        sample_paths = []
        
        for _, row in problematic_samples.iterrows():
            sample_id = row['sample_idx']
            result = self.find_sample_by_id(sample_id)
            
            if result and result.get('file_info'):
                file_info = result['file_info']
                sample_paths.append({
                    'sample_id': sample_id,
                    'file_path': file_info.get('image_path', 'Unknown'),
                    'sample_name': file_info.get('sample_name', f'sample_{sample_id}'),
                    'metrics': {
                        'bg_fg_ratio': row['bg_fg_ratio'],
                        'total_loss': row['total_loss'],
                        'mpsnr': row['mpsnr'],
                        'mssim': row['mssim']
                    }
                })
        
        return sample_paths

    def generate_statistics(self):
        """生成统计信息"""
        if not self.results:
            print("没有分析结果可用于统计")
            return None
        
        df = pd.DataFrame(self.results)
        
        stats = {
            'total_samples': len(self.results),
            'has_segmentation': df['has_segmentation'].any(),
            'metrics_stats': {},
            'bg_fg_ratio_stats': {
                'mean': df['bg_fg_ratio'].mean(),
                'std': df['bg_fg_ratio'].std(),
                'median': df['bg_fg_ratio'].median(),
                'min': df['bg_fg_ratio'].min(),
                'max': df['bg_fg_ratio'].max(),
                'q25': df['bg_fg_ratio'].quantile(0.25),
                'q75': df['bg_fg_ratio'].quantile(0.75)
            }
        }
        
        # 计算各项指标的统计信息
        metrics = ['total_loss', 'perceptual_loss', 'ms_ssim_loss', 'l1_loss', 'l2_loss', 'mpsnr', 'mssim']
        
        # 如果有分割指标，添加到分析中
        if stats['has_segmentation']:
            seg_metrics = ['seg_iou', 'seg_dice', 'seg_pixel_accuracy', 'seg_sensitivity', 'seg_specificity']
            metrics.extend(seg_metrics)
        
        for metric in metrics:
            if metric in df.columns:
                stats['metrics_stats'][metric] = {
                    'mean': df[metric].mean(),
                    'std': df[metric].std(),
                    'median': df[metric].median(),
                    'min': df[metric].min(),
                    'max': df[metric].max(),
                    'q25': df[metric].quantile(0.25),
                    'q75': df[metric].quantile(0.75)
                }
        
        return stats
    
    def identify_problematic_samples(self, criteria=None):
        """识别效果不好的样本"""
        if not self.results:
            print("没有分析结果可用于识别问题样本")
            return []
        
        df = pd.DataFrame(self.results)
        
        # 默认筛选条件
        if criteria is None:
            criteria = {
                'mpsnr_threshold': df['mpsnr'].quantile(0.25),
                'mssim_threshold': df['mssim'].quantile(0.25),
                'loss_threshold': df['total_loss'].quantile(0.75),
            }
            
            # 如果有分割指标，添加分割相关的筛选条件
            if df['has_segmentation'].any():
                criteria.update({
                    'seg_iou_threshold': df['seg_iou'].quantile(0.25),
                    'seg_dice_threshold': df['seg_dice'].quantile(0.25),
                })
        
        # 基础筛选条件
        problematic_mask = (
            (df['mpsnr'] < criteria['mpsnr_threshold']) |
            (df['mssim'] < criteria['mssim_threshold']) |
            (df['total_loss'] > criteria['loss_threshold'])
        )
        
        # 如果有分割指标，添加分割相关的筛选条件
        if df['has_segmentation'].any() and 'seg_iou_threshold' in criteria:
            problematic_mask |= (
                (df['seg_iou'] < criteria['seg_iou_threshold']) |
                (df['seg_dice'] < criteria['seg_dice_threshold'])
            )
        
        problematic_samples = df[problematic_mask].copy()
        
        # 添加问题类型标记
        problematic_samples['issue_type'] = ''
        problematic_samples.loc[df['mpsnr'] < criteria['mpsnr_threshold'], 'issue_type'] += 'Low_PSNR '
        problematic_samples.loc[df['mssim'] < criteria['mssim_threshold'], 'issue_type'] += 'Low_SSIM '
        problematic_samples.loc[df['total_loss'] > criteria['loss_threshold'], 'issue_type'] += 'High_Loss '
        
        if df['has_segmentation'].any() and 'seg_iou_threshold' in criteria:
            problematic_samples.loc[df['seg_iou'] < criteria['seg_iou_threshold'], 'issue_type'] += 'Low_IoU '
            problematic_samples.loc[df['seg_dice'] < criteria['seg_dice_threshold'], 'issue_type'] += 'Low_Dice '
        
        # 识别主导损失组件
        loss_components = ['perceptual_loss', 'ms_ssim_loss', 'l1_loss', 'l2_loss']
        available_loss_components = [comp for comp in loss_components if comp in problematic_samples.columns]
        
        if available_loss_components:
            dominant_loss = problematic_samples[available_loss_components].idxmax(axis=1)
            problematic_samples['dominant_loss'] = dominant_loss.map({
                'perceptual_loss': 'Perceptual',
                'ms_ssim_loss': 'MS-SSIM',
                'l1_loss': 'L1',
                'l2_loss': 'L2'
            })
        else:
            problematic_samples['dominant_loss'] = 'Unknown'
        
        # 按照损失值排序
        problematic_samples = problematic_samples.sort_values('total_loss', ascending=False)
        
        print(f"识别出 {len(problematic_samples)} 个问题样本 (占总数的 {len(problematic_samples)/len(df)*100:.1f}%)")
        
        return problematic_samples
    
    def plot_analysis(self, output_dir):
        """生成分析图表 - 包含箱型图"""
        if not self.results:
            print("没有分析结果可用于绘图")
            return
        
        output_dir = Path(output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)
        
        df = pd.DataFrame(self.results)
        
        # 设置绘图风格
        plt.style.use('default')
        sns.set_palette("husl")
        
        # 1. 基础分析图表（包含箱型图）
        self._create_basic_plots_with_boxplot(output_dir, df)
        
        # 2. 相关性热力图
        self._create_correlation_heatmap(output_dir, df)
        
        # 3. 问题样本分析
        self._create_problematic_analysis(output_dir, df)
    
    def _create_basic_plots_with_boxplot(self, output_dir, df):
        """创建包含箱型图的基础分析图表"""
        has_seg = df['has_segmentation'].any()
        
        if has_seg:
            fig, axes = plt.subplots(4, 3, figsize=(20, 20))
            fig.suptitle('Dataset Analysis: Background/Foreground Ratio vs Model Performance (含分割分析)', fontsize=16)
        else:
            fig, axes = plt.subplots(3, 3, figsize=(20, 16))
            fig.suptitle('Dataset Analysis: Background/Foreground Ratio vs Model Performance (含箱型图)', fontsize=16)
        
        # 过滤无穷大值
        finite_mask = np.isfinite(df['bg_fg_ratio'])
        df_finite = df[finite_mask]
        
        # 第一行：重建指标散点图
        metrics = ['total_loss', 'mpsnr', 'mssim']
        metric_labels = ['Total Loss', 'PSNR (dB)', 'SSIM']
        
        for i, (metric, label) in enumerate(zip(metrics, metric_labels)):
            ax = axes[0, i]
            scatter = ax.scatter(df_finite['bg_fg_ratio'], df_finite[metric], 
                               alpha=0.6, s=20, c=df_finite[metric], cmap='viridis')
            ax.set_xlabel('Background/Foreground Ratio')
            ax.set_ylabel(label)
            ax.set_title(f'{label} vs BG/FG Ratio')
            ax.grid(True, alpha=0.3)
            plt.colorbar(scatter, ax=ax)
            
            if len(df_finite) > 1:
                correlation = np.corrcoef(df_finite['bg_fg_ratio'], df_finite[metric])[0, 1]
                ax.text(0.05, 0.95, f'Corr: {correlation:.3f}', 
                       transform=ax.transAxes, fontsize=10,
                       bbox=dict(boxstyle="round,pad=0.3", facecolor="white", alpha=0.8))
        
        # 第二行：重建指标分布直方图
        for i, (metric, label) in enumerate(zip(metrics, metric_labels)):
            ax = axes[1, i]
            ax.hist(df_finite[metric], bins=50, alpha=0.7, edgecolor='black')
            ax.set_xlabel(label)
            ax.set_ylabel('Frequency')
            ax.set_title(f'{label} Distribution')
            ax.grid(True, alpha=0.3)
            
            mean_val = df_finite[metric].mean()
            std_val = df_finite[metric].std()
            ax.axvline(mean_val, color='red', linestyle='--', label=f'Mean: {mean_val:.3f}')
            ax.axvline(mean_val + std_val, color='orange', linestyle=':', label=f'+1σ: {mean_val + std_val:.3f}')
            ax.axvline(mean_val - std_val, color='orange', linestyle=':', label=f'-1σ: {mean_val - std_val:.3f}')
            ax.legend()
        
        # 第三行：分割指标（如果有的话）
        if has_seg:
            seg_metrics = ['seg_iou', 'seg_dice', 'seg_pixel_accuracy']
            seg_labels = ['IoU', 'Dice Coefficient', 'Pixel Accuracy']
            
            for i, (metric, label) in enumerate(zip(seg_metrics, seg_labels)):
                ax = axes[2, i]
                scatter = ax.scatter(df_finite['bg_fg_ratio'], df_finite[metric], 
                                   alpha=0.6, s=20, c=df_finite[metric], cmap='plasma')
                ax.set_xlabel('Background/Foreground Ratio')
                ax.set_ylabel(label)
                ax.set_title(f'{label} vs BG/FG Ratio')
                ax.grid(True, alpha=0.3)
                plt.colorbar(scatter, ax=ax)
                
                if len(df_finite) > 1:
                    correlation = np.corrcoef(df_finite['bg_fg_ratio'], df_finite[metric])[0, 1]
                    ax.text(0.05, 0.95, f'Corr: {correlation:.3f}', 
                           transform=ax.transAxes, fontsize=10,
                           bbox=dict(boxstyle="round,pad=0.3", facecolor="white", alpha=0.8))
            
            # 第四行：箱型图
            row_idx = 3
        else:
            # 第三行：箱型图
            row_idx = 2
        
        # 箱型图分析
        all_metrics = ['bg_fg_ratio', 'mpsnr', 'mssim', 'total_loss']
        if has_seg:
            all_metrics.extend(['seg_iou', 'seg_dice'])
        
        available_metrics = [m for m in all_metrics if m in df_finite.columns]
        
        # 标准化数据用于对比
        df_normalized = df_finite[available_metrics].copy()
        for metric in available_metrics:
            mean_val = df_normalized[metric].mean()
            std_val = df_normalized[metric].std()
            if std_val > 0:
                df_normalized[metric] = (df_normalized[metric] - mean_val) / std_val
        
        box_data = [df_normalized[metric].dropna() for metric in available_metrics]
        bp1 = axes[row_idx, 0].boxplot(box_data, labels=available_metrics, patch_artist=True)
        
        colors = ['lightblue', 'lightgreen', 'lightcoral', 'lightyellow', 'lightpink', 'lightgray']
        for patch, color in zip(bp1['boxes'], colors[:len(available_metrics)]):
            patch.set_facecolor(color)
            patch.set_alpha(0.7)
        
        axes[row_idx, 0].set_ylabel('Normalized Values')
        axes[row_idx, 0].set_title('All Metrics Box Plot (Normalized)')
        axes[row_idx, 0].grid(True, alpha=0.3)
        axes[row_idx, 0].tick_params(axis='x', rotation=45)
        
        # BG/FG比例详细箱型图
        bp2 = axes[row_idx, 1].boxplot([df_finite['bg_fg_ratio'].dropna()], patch_artist=True)
        bp2['boxes'][0].set_facecolor('lightblue')
        bp2['boxes'][0].set_alpha(0.7)
        axes[row_idx, 1].set_ylabel('BG/FG Ratio')
        axes[row_idx, 1].set_title('BG/FG Ratio Box Plot')
        axes[row_idx, 1].set_yscale('log')
        axes[row_idx, 1].grid(True, alpha=0.3)
        
        # 性能指标对比箱型图
        if has_seg:
            performance_metrics = ['mpsnr', 'mssim', 'seg_iou', 'seg_dice']
        else:
            performance_metrics = ['mpsnr', 'mssim']
            
        performance_data = [df_finite[metric].dropna() for metric in performance_metrics if metric in df_finite.columns]
        
        if performance_data:
            bp3 = axes[row_idx, 2].boxplot(performance_data, 
                                         labels=[m.upper() for m in performance_metrics if m in df_finite.columns], 
                                         patch_artist=True)
            
            perf_colors = ['lightgreen', 'lightcoral', 'lightblue', 'lightyellow']
            for patch, color in zip(bp3['boxes'], perf_colors[:len(performance_data)]):
                patch.set_facecolor(color)
                patch.set_alpha(0.7)
            
            axes[row_idx, 2].set_ylabel('Values')
            axes[row_idx, 2].set_title('Performance Metrics Box Plot')
            axes[row_idx, 2].grid(True, alpha=0.3)
            axes[row_idx, 2].tick_params(axis='x', rotation=45)
        
        plt.tight_layout()
        plt.savefig(output_dir / 'bg_fg_analysis_with_boxplot.png', dpi=300, bbox_inches='tight')
        plt.close()
    
    def _create_correlation_heatmap(self, output_dir, df):
        """创建相关性热力图"""
        correlation_metrics = ['bg_fg_ratio', 'total_loss', 'mpsnr', 'mssim', 'perceptual_loss', 'l1_loss', 'l2_loss']
        available_metrics = [m for m in correlation_metrics if m in df.columns]
        
        if len(available_metrics) > 2:
            corr_matrix = df[available_metrics].corr()
            
            plt.figure(figsize=(10, 8))
            sns.heatmap(corr_matrix, annot=True, cmap='RdBu_r', center=0, 
                       square=True, fmt='.3f', cbar_kws={'label': 'Correlation'})
            plt.title('Correlation Matrix: BG/FG Ratio vs Model Metrics')
            plt.tight_layout()
            plt.savefig(output_dir / 'correlation_heatmap.png', dpi=300, bbox_inches='tight')
            plt.close()
    
    def _create_problematic_analysis(self, output_dir, df):
        """创建问题样本分析图表"""
        problematic_samples = self.identify_problematic_samples()
        if len(problematic_samples) > 0:
            plt.figure(figsize=(12, 8))
            
            # 创建子图
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            fig.suptitle('Problematic Samples Analysis', fontsize=16)
            
            # 问题样本的bg_fg_ratio分布 - 改进版本
            axes[0, 0].hist(df['bg_fg_ratio'], bins=50, alpha=0.5, label=f'All Samples (n={len(df)})', density=True)
            axes[0, 0].hist(problematic_samples['bg_fg_ratio'], bins=30, alpha=0.7, label=f'Problematic (n={len(problematic_samples)})', density=True)
            axes[0, 0].set_xlabel('BG/FG Ratio')
            axes[0, 0].set_ylabel('Probability Density')
            axes[0, 0].set_title('BG/FG Ratio Distribution (Normalized)')
            axes[0, 0].legend()
            axes[0, 0].grid(True, alpha=0.3)
            
            # 添加均值线
            all_mean = df['bg_fg_ratio'].mean()
            prob_mean = problematic_samples['bg_fg_ratio'].mean()
            axes[0, 0].axvline(all_mean, color='blue', linestyle='--', alpha=0.7, label=f'All Mean: {all_mean:.2f}')
            axes[0, 0].axvline(prob_mean, color='red', linestyle='--', alpha=0.7, label=f'Prob Mean: {prob_mean:.2f}')
            
            # 问题样本的PSNR分布
            axes[0, 1].hist(df['mpsnr'], bins=50, alpha=0.5, label='All Samples', density=True)
            axes[0, 1].hist(problematic_samples['mpsnr'], bins=30, alpha=0.7, label='Problematic', density=True)
            axes[0, 1].set_xlabel('PSNR (dB)')
            axes[0, 1].set_ylabel('Density')
            axes[0, 1].set_title('PSNR Distribution')
            axes[0, 1].legend()
            axes[0, 1].grid(True, alpha=0.3)
            
            # 问题样本的SSIM分布
            axes[1, 0].hist(df['mssim'], bins=50, alpha=0.5, label='All Samples', density=True)
            axes[1, 0].hist(problematic_samples['mssim'], bins=30, alpha=0.7, label='Problematic', density=True)
            axes[1, 0].set_xlabel('SSIM')
            axes[1, 0].set_ylabel('Density')
            axes[1, 0].set_title('SSIM Distribution')
            axes[1, 0].legend()
            axes[1, 0].grid(True, alpha=0.3)
            
            # 问题样本的损失分布
            axes[1, 1].hist(df['total_loss'], bins=50, alpha=0.5, label='All Samples', density=True)
            axes[1, 1].hist(problematic_samples['total_loss'], bins=30, alpha=0.7, label='Problematic', density=True)
            axes[1, 1].set_xlabel('Total Loss')
            axes[1, 1].set_ylabel('Density')
            axes[1, 1].set_title('Total Loss Distribution')
            axes[1, 1].legend()
            axes[1, 1].grid(True, alpha=0.3)
            
            plt.tight_layout()
            plt.savefig(output_dir / 'problematic_samples_analysis.png', dpi=300, bbox_inches='tight')
            plt.close()


def load_experiment_config(experiment_dir):
    """从实验目录加载配置文件"""
    config_path = Path(experiment_dir) / "experiment_config.yaml"
    if not config_path.exists():
        raise FileNotFoundError(f"Config file not found: {config_path}")
    
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    return config


def find_checkpoint(experiment_dir, ckpt_filename=None):
    """在实验目录中查找检查点文件"""
    checkpoint_dir = Path(experiment_dir) / "checkpoints"
    if not checkpoint_dir.exists():
        raise FileNotFoundError(f"Checkpoints directory not found: {checkpoint_dir}")
    
    if ckpt_filename:
        ckpt_path = checkpoint_dir / ckpt_filename
        if not ckpt_path.exists():
            raise FileNotFoundError(f"Specified checkpoint not found: {ckpt_path}")
        return str(ckpt_path)
    
    ckpt_files = list(checkpoint_dir.glob("*.ckpt"))
    if not ckpt_files:
        raise FileNotFoundError(f"No checkpoint files found in: {checkpoint_dir}")
    
    # 返回最新的检查点文件
    newest_ckpt = max(ckpt_files, key=lambda x: x.stat().st_mtime)
    return str(newest_ckpt)


def main():
    parser = argparse.ArgumentParser(description='数据集全面分析工具')
    
    parser.add_argument('--experiment_dir', type=str, 
                        default=r"D:/code/work/pabone/thesis_code/pa_us_fusion_v2/logs/lightning_logs/version_70",
                        help='实验目录路径')
    parser.add_argument('--ckpt_filename', type=str, default=None,
                        help='指定检查点文件名')
    parser.add_argument('--dataset_path', type=str, 
                        default=r'',
                        default=r'',
                        help='数据集路径')
    parser.add_argument('--max_samples', type=int, default=None,
                        help='最大分析样本数（None表示全部）')
    parser.add_argument('--output_dir', type=str, 
                        default="dataset_analysis",
                        help='输出目录')
    parser.add_argument('--device', type=str, default="cpu",
                        help='运行设备')
    parser.add_argument('--batch_size', type=int, default=1,
                        help='批处理大小')
    parser.add_argument('--bg_threshold', type=float, default=0.0,
                        help='背景/前景分割阈值')
    parser.add_argument('--data_range', type=float, default=2.0,
                        help='数据范围，用于PSNR计算')
    
    args = parser.parse_args()
    
    # 加载实验配置
    config = load_experiment_config(args.experiment_dir)
    
    # 提取配置参数
    dataset_config = config.get('data', {})
    model_config = config.get('model', {})
    
    # 智能选择数据集路径
    if args.dataset_path and args.dataset_path.strip():
        # 如果用户指定了数据集路径，使用用户指定的
        dataset_path = args.dataset_path
        dataset_type = "用户指定"
    else:
        # 优先使用验证集，如果不存在则使用训练集
        val_file = dataset_config.get('val_file', '')
        train_file = dataset_config.get('train_file', '')
        dataset_base_path = dataset_config.get('dataset_path', '')
        
        if val_file and val_file.strip():
            # 构建验证集完整路径
            if Path(val_file).is_absolute():
                dataset_path = val_file
            else:
                dataset_path = str(Path(dataset_base_path) / val_file) if dataset_base_path else val_file
            dataset_type = "验证集"
        elif train_file and train_file.strip():
            # 构建训练集完整路径
            if Path(train_file).is_absolute():
                dataset_path = train_file
            else:
                dataset_path = str(Path(dataset_base_path) / train_file) if dataset_base_path else train_file
            dataset_type = "训练集"
        else:
            # 最后使用基础数据集路径
            dataset_path = dataset_base_path
            dataset_type = "基础数据集"
    
    print(f"使用数据集: {dataset_type} - {dataset_path}")
    
    # 验证数据集路径是否存在
    if not Path(dataset_path).exists():
        raise FileNotFoundError(f"数据集路径不存在: {dataset_path}")
    
    # 智能选择数据集路径
    if args.dataset_path and args.dataset_path.strip():
        # 如果用户指定了数据集路径，使用用户指定的
        dataset_path = args.dataset_path
        dataset_type = "用户指定"
    else:
        # 优先使用验证集，如果不存在则使用训练集
        val_file = dataset_config.get('val_file', '')
        train_file = dataset_config.get('train_file', '')
        dataset_base_path = dataset_config.get('train_dir', '')
        
        if val_file and val_file.strip():
            # 构建验证集完整路径
            if Path(val_file).is_absolute():
                dataset_path = val_file
            else:
                dataset_path = str(Path(dataset_base_path) / val_file) if dataset_base_path else val_file
            dataset_type = "验证集"
        elif train_file and train_file.strip():
            # 构建训练集完整路径
            if Path(train_file).is_absolute():
                dataset_path = train_file
            else:
                dataset_path = str(Path(dataset_base_path) / train_file) if dataset_base_path else train_file
            dataset_type = "训练集"
        else:
            # 最后使用基础数据集路径
            dataset_path = dataset_base_path
            dataset_type = "基础数据集"
    
    print(f"使用数据集: {dataset_type} - {dataset_path}")
    
    # 验证数据集路径是否存在
    if not Path(dataset_path).exists():
        raise FileNotFoundError(f"数据集路径不存在: {dataset_path}")
    
    modality = dataset_config.get('modality', '')
    
    model_type = model_config.get('model_type', 'myunet')
    in_channels = model_config.get('in_channels', 1)
    up_mode = model_config.get('up_mode', 'upsample')
    merge_mode = model_config.get('merge_mode', 'concat')
    
    # Extract loss function parameters from model_config
    lambda_perc = model_config.get('loss_perc', 1.0)
    lambda_msssim = model_config.get('loss_msssim', 1.0)
    lambda_l2 = model_config.get('loss_l2', 1.0)
    lambda_l1 = model_config.get('loss_l1', 1.0)
    
    # 查找检查点
    checkpoint_path = find_checkpoint(args.experiment_dir, args.ckpt_filename)
    print(f"Using checkpoint: {checkpoint_path}")
    
    # 创建模型
    if model_type.lower() == 'aynet':
        model = AYNet(in_channels=in_channels, up_mode=up_mode, merge_mode=merge_mode)
    elif model_type.lower() == 'myunet':
        model = HighResolutionModel()
    else:
        raise ValueError(f"Unsupported model type: {model_type}")
    
    # 加载模型权重
    checkpoint = torch.load(checkpoint_path, map_location=torch.device(args.device))
    state_dict = {k[6:]: v for k, v in checkpoint['state_dict'].items() if k.startswith('model.')}
    
    # 获取当前模型的参数名称
    model_keys = set(model.state_dict().keys())
    checkpoint_keys = set(state_dict.keys())
    
    # 找出匹配的键
    matching_keys = model_keys.intersection(checkpoint_keys)
    missing_keys = model_keys - checkpoint_keys
    unexpected_keys = checkpoint_keys - model_keys
    
    # 只加载匹配的参数
    filtered_state_dict = {k: v for k, v in state_dict.items() if k in matching_keys}
    
    # 打印加载信息
    print(f"Loading model weights:")
    print(f"  - Matching parameters: {len(matching_keys)}")
    print(f"  - Missing parameters: {len(missing_keys)}")
    print(f"  - Unexpected parameters: {len(unexpected_keys)}")
    
    if missing_keys:
        print(f"  - Missing keys: {list(missing_keys)[:5]}{'...' if len(missing_keys) > 5 else ''}")
    if unexpected_keys:
        print(f"  - Unexpected keys: {list(unexpected_keys)[:5]}{'...' if len(unexpected_keys) > 5 else ''}")
    
    # 使用strict=False加载权重
    model.load_state_dict(filtered_state_dict, strict=False)
    
    # 获取当前模型的参数名称
    model_keys = set(model.state_dict().keys())
    checkpoint_keys = set(state_dict.keys())
    
    # 找出匹配的键
    matching_keys = model_keys.intersection(checkpoint_keys)
    missing_keys = model_keys - checkpoint_keys
    unexpected_keys = checkpoint_keys - model_keys
    
    # 只加载匹配的参数
    filtered_state_dict = {k: v for k, v in state_dict.items() if k in matching_keys}
    
    # 打印加载信息
    print(f"Loading model weights:")
    print(f"  - Matching parameters: {len(matching_keys)}")
    print(f"  - Missing parameters: {len(missing_keys)}")
    print(f"  - Unexpected parameters: {len(unexpected_keys)}")
    
    if missing_keys:
        print(f"  - Missing keys: {list(missing_keys)[:5]}{'...' if len(missing_keys) > 5 else ''}")
    if unexpected_keys:
        print(f"  - Unexpected keys: {list(unexpected_keys)[:5]}{'...' if len(unexpected_keys) > 5 else ''}")
    
    # 使用strict=False加载权重
    model.load_state_dict(filtered_state_dict, strict=False)
    model.to(args.device)
    
    # 创建损失函数
    loss_fn = ComprehensiveLoss(lambda_perc=lambda_perc, lambda_msssim=lambda_msssim, lambda_l2=lambda_l2, lambda_l1=lambda_l1)
    
    # 准备数据集
    image_transform = transforms.Compose([
        transforms.Resize((256, 128)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5], std=[0.5])
    ])
    sensor_transform = partial(preprocess_sensor_data,
                              target_shape=(2560, 64))
    
    dataset = PaUsDataset(dataset_path, 
                         image_transform=image_transform, 
                         sensor_transform=sensor_transform)
    
    print(f"数据集加载完成，共 {len(dataset)} 个样本")
    
    # 检测模型是否有分割头
    has_segmentation = hasattr(model, 'segmentation_head')
    
    print(f"模型类型: {model_type}")
    print(f"是否包含分割头: {has_segmentation}")
    
    # 创建分析器
    analyzer = DatasetAnalyzer(
        model=model,
        loss_fn=loss_fn,
        device=args.device,
        bg_threshold=args.bg_threshold,
        data_range=args.data_range,
        has_segmentation=has_segmentation
    )
    
    # 执行分析
    results = analyzer.analyze_dataset(dataset, max_samples=args.max_samples, batch_size=args.batch_size)
    
    # 生成图表
    analyzer.plot_analysis(args.output_dir)
    
    # 保存结果
    output_dir = analyzer.save_results(args.output_dir)
    
    # 输出摘要
    stats = analyzer.generate_statistics()
    problematic_samples = analyzer.identify_problematic_samples()
    
    print(f"\n=== 分析完成 ===")
    print(f"总样本数: {stats['total_samples']}")
    print(f"问题样本数: {len(problematic_samples)} ({len(problematic_samples)/stats['total_samples']*100:.1f}%)")
    print(f"平均PSNR: {stats['metrics_stats']['mpsnr']['mean']:.2f} dB")
    print(f"平均SSIM: {stats['metrics_stats']['mssim']['mean']:.4f}")
    print(f"平均背景/前景比例: {stats['bg_fg_ratio_stats']['mean']:.3f}")
    print(f"\n结果已保存至: {output_dir}")
    print(f"- 分析报告: {output_dir / 'analysis_report.md'}")
    print(f"- 问题样本列表: {output_dir / 'problematic_samples.csv'}")
    print(f"- 可视化图表: {output_dir / '*.png'}")


if __name__ == '__main__':
    main()
