#!/usr/bin/env python3
"""
分析EchoNet-Dynamic元数据
分析VolumeTracings.csv和FileList.csv的结构，确定心内膜/心外膜区分方式
"""

import pandas as pd
import numpy as np
from pathlib import Path
from typing import Dict, List, Tuple, Optional
import json
import sys

# 添加项目根目录到路径
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root))

from src.utils.logger import get_logger

logger = get_logger("AnalyzeEchoNetMetadata")


def analyze_volumetracings(csv_path: str) -> Dict:
    """
    分析VolumeTracings.csv结构
    
    Args:
        csv_path: VolumeTracings.csv路径
        
    Returns:
        分析结果字典
    """
    logger.info(f"分析VolumeTracings.csv: {csv_path}")
    
    if not Path(csv_path).exists():
        logger.error(f"文件不存在: {csv_path}")
        return {}
    
    df = pd.read_csv(csv_path)
    
    analysis = {
        'columns': list(df.columns),
        'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()},
        'total_rows': len(df),
        'unique_videos': df['FileName'].nunique() if 'FileName' in df.columns else 0,
        'unique_frames': df['Frame'].nunique() if 'Frame' in df.columns else 0,
        'frame_range': {
            'min': int(df['Frame'].min()) if 'Frame' in df.columns else None,
            'max': int(df['Frame'].max()) if 'Frame' in df.columns else None
        },
        'coordinate_ranges': {}
    }
    
    # 分析坐标范围
    if 'X1' in df.columns:
        analysis['coordinate_ranges']['X1'] = {
            'min': float(df['X1'].min()),
            'max': float(df['X1'].max()),
            'mean': float(df['X1'].mean())
        }
    if 'Y1' in df.columns:
        analysis['coordinate_ranges']['Y1'] = {
            'min': float(df['Y1'].min()),
            'max': float(df['Y1'].max()),
            'mean': float(df['Y1'].mean())
        }
    if 'X2' in df.columns:
        analysis['coordinate_ranges']['X2'] = {
            'min': float(df['X2'].min()),
            'max': float(df['X2'].max()),
            'mean': float(df['X2'].mean())
        }
    if 'Y2' in df.columns:
        analysis['coordinate_ranges']['Y2'] = {
            'min': float(df['Y2'].min()),
            'max': float(df['Y2'].max()),
            'mean': float(df['Y2'].mean())
        }
    
    # 分析每个视频的标注帧分布
    if 'FileName' in df.columns and 'Frame' in df.columns:
        video_frame_counts = df.groupby('FileName')['Frame'].nunique()
        analysis['frames_per_video'] = {
            'min': int(video_frame_counts.min()),
            'max': int(video_frame_counts.max()),
            'mean': float(video_frame_counts.mean()),
            'median': float(video_frame_counts.median())
        }
        
        # 分析每个视频的标注点数量
        video_point_counts = df.groupby('FileName').size()
        analysis['points_per_video'] = {
            'min': int(video_point_counts.min()),
            'max': int(video_point_counts.max()),
            'mean': float(video_point_counts.mean()),
            'median': float(video_point_counts.median())
        }
    
    # 检查是否有心内膜/心外膜区分字段
    potential_endo_epi_fields = [col for col in df.columns if any(
        keyword in col.lower() for keyword in ['endo', 'epi', 'inner', 'outer', 'type', 'contour']
    )]
    analysis['potential_endo_epi_fields'] = potential_endo_epi_fields
    
    # 如果没有明确的区分字段，尝试通过坐标分析推断
    if not potential_endo_epi_fields and 'X1' in df.columns and 'Y1' in df.columns:
        # 检查是否有重复的Frame，可能表示心内膜和心外膜
        frame_counts = df.groupby(['FileName', 'Frame']).size()
        duplicate_frames = frame_counts[frame_counts > 1]
        if len(duplicate_frames) > 0:
            analysis['has_duplicate_frames'] = True
            analysis['duplicate_frame_count'] = len(duplicate_frames)
            analysis['note'] = "发现同一视频同一帧有多条记录，可能表示心内膜和心外膜"
        else:
            analysis['has_duplicate_frames'] = False
            analysis['note'] = "未发现重复帧，可能只有一种轮廓类型"
    
    logger.info(f"分析完成: {analysis['unique_videos']} 个视频, {analysis['unique_frames']} 个唯一帧")
    return analysis


def analyze_filelist(csv_path: str) -> Dict:
    """
    分析FileList.csv结构，提取EF等元数据
    
    Args:
        csv_path: FileList.csv路径
        
    Returns:
        分析结果字典
    """
    logger.info(f"分析FileList.csv: {csv_path}")
    
    if not Path(csv_path).exists():
        logger.error(f"文件不存在: {csv_path}")
        return {}
    
    df = pd.read_csv(csv_path)
    
    analysis = {
        'columns': list(df.columns),
        'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()},
        'total_rows': len(df),
        'has_ef': 'EF' in df.columns or 'EjectionFraction' in df.columns,
        'ef_column': None,
        'ef_statistics': {}
    }
    
    # 查找EF列
    ef_columns = [col for col in df.columns if 'EF' in col.upper() or 'EJECTION' in col.upper()]
    if ef_columns:
        analysis['ef_column'] = ef_columns[0]
        ef_data = df[ef_columns[0]]
        analysis['ef_statistics'] = {
            'min': float(ef_data.min()),
            'max': float(ef_data.max()),
            'mean': float(ef_data.mean()),
            'median': float(ef_data.median()),
            'std': float(ef_data.std()),
            'non_null_count': int(ef_data.notna().sum())
        }
    
    # 检查是否有Split列
    if 'Split' in df.columns:
        analysis['has_split'] = True
        analysis['split_distribution'] = df['Split'].value_counts().to_dict()
    else:
        analysis['has_split'] = False
    
    # 检查其他可能的元数据列
    metadata_columns = [col for col in df.columns if col not in ['FileName']]
    analysis['metadata_columns'] = metadata_columns
    
    logger.info(f"分析完成: {len(df)} 个视频文件")
    if analysis['has_ef']:
        logger.info(f"发现EF数据: {analysis['ef_statistics']}")
    
    return analysis


def analyze_sample_video(df_volume: pd.DataFrame, df_filelist: Optional[pd.DataFrame], 
                         video_id: str) -> Dict:
    """
    分析单个视频的标注情况
    
    Args:
        df_volume: VolumeTracings数据框
        df_filelist: FileList数据框（可选）
        video_id: 视频ID
        
    Returns:
        视频分析结果
    """
    video_data = df_volume[df_volume['FileName'] == f"{video_id}.avi"]
    
    if video_data.empty:
        return {}
    
    analysis = {
        'video_id': video_id,
        'total_points': len(video_data),
        'annotated_frames': sorted(video_data['Frame'].unique().tolist()),
        'num_annotated_frames': len(video_data['Frame'].unique()),
        'points_per_frame': {}
    }
    
    # 分析每帧的点数
    for frame_num in analysis['annotated_frames']:
        frame_data = video_data[video_data['Frame'] == frame_num]
        analysis['points_per_frame'][frame_num] = len(frame_data)
    
    # 检查是否有重复帧（可能表示心内膜和心外膜）
    frame_counts = video_data.groupby('Frame').size()
    duplicate_frames = frame_counts[frame_counts > 1].index.tolist()
    if duplicate_frames:
        analysis['has_duplicate_frames'] = True
        analysis['duplicate_frames'] = duplicate_frames
        analysis['note'] = "发现重复帧，可能包含心内膜和心外膜"
    else:
        analysis['has_duplicate_frames'] = False
        analysis['note'] = "每帧只有一组点，可能只有一种轮廓类型"
    
    # 如果有FileList，添加EF信息
    if df_filelist is not None and 'FileName' in df_filelist.columns:
        filelist_row = df_filelist[df_filelist['FileName'] == f"{video_id}.avi"]
        if not filelist_row.empty:
            if 'EF' in filelist_row.columns:
                analysis['ef'] = float(filelist_row['EF'].iloc[0]) if pd.notna(filelist_row['EF'].iloc[0]) else None
            elif 'EjectionFraction' in filelist_row.columns:
                analysis['ef'] = float(filelist_row['EjectionFraction'].iloc[0]) if pd.notna(filelist_row['EjectionFraction'].iloc[0]) else None
    
    return analysis


def generate_analysis_report(volume_path: str, filelist_path: Optional[str] = None,
                           output_path: str = "docs/echonet_metadata_analysis.md",
                           sample_videos: List[str] = None) -> None:
    """
    生成完整的分析报告
    
    Args:
        volume_path: VolumeTracings.csv路径
        filelist_path: FileList.csv路径（可选）
        output_path: 输出报告路径
        sample_videos: 要分析的示例视频ID列表
    """
    logger.info("开始生成EchoNet元数据分析报告...")
    
    # 分析VolumeTracings
    volume_analysis = analyze_volumetracings(volume_path)
    
    # 分析FileList（如果存在）
    filelist_analysis = {}
    if filelist_path and Path(filelist_path).exists():
        filelist_analysis = analyze_filelist(filelist_path)
    
    # 分析示例视频
    sample_analyses = []
    if sample_videos:
        df_volume = pd.read_csv(volume_path)
        df_filelist = pd.read_csv(filelist_path) if filelist_path and Path(filelist_path).exists() else None
        
        for video_id in sample_videos[:5]:  # 只分析前5个
            sample_analysis = analyze_sample_video(df_volume, df_filelist, video_id)
            if sample_analysis:
                sample_analyses.append(sample_analysis)
    
    # 生成报告
    report_lines = [
        "# EchoNet-Dynamic 元数据分析报告\n",
        "## 1. VolumeTracings.csv 分析\n",
        f"### 基本信息\n",
        f"- **总行数**: {volume_analysis.get('total_rows', 0):,}\n",
        f"- **唯一视频数**: {volume_analysis.get('unique_videos', 0):,}\n",
        f"- **唯一帧数**: {volume_analysis.get('unique_frames', 0):,}\n",
        f"- **帧范围**: {volume_analysis.get('frame_range', {})}\n",
        f"\n### 列信息\n",
        f"- **列名**: {', '.join(volume_analysis.get('columns', []))}\n",
        f"\n### 坐标范围\n"
    ]
    
    for coord, ranges in volume_analysis.get('coordinate_ranges', {}).items():
        report_lines.append(f"- **{coord}**: min={ranges['min']:.2f}, max={ranges['max']:.2f}, mean={ranges['mean']:.2f}\n")
    
    report_lines.extend([
        f"\n### 标注统计\n",
        f"- **每视频标注帧数**: min={volume_analysis.get('frames_per_video', {}).get('min', 0)}, "
        f"max={volume_analysis.get('frames_per_video', {}).get('max', 0)}, "
        f"mean={volume_analysis.get('frames_per_video', {}).get('mean', 0):.2f}\n",
        f"- **每视频标注点数**: min={volume_analysis.get('points_per_video', {}).get('min', 0)}, "
        f"max={volume_analysis.get('points_per_video', {}).get('max', 0)}, "
        f"mean={volume_analysis.get('points_per_video', {}).get('mean', 0):.2f}\n"
    ])
    
    # 心内膜/心外膜分析
    if volume_analysis.get('potential_endo_epi_fields'):
        report_lines.append(f"\n### 心内膜/心外膜区分\n")
        report_lines.append(f"- **发现区分字段**: {', '.join(volume_analysis['potential_endo_epi_fields'])}\n")
    elif volume_analysis.get('has_duplicate_frames'):
        report_lines.append(f"\n### 心内膜/心外膜推断\n")
        report_lines.append(f"- **发现重复帧**: {volume_analysis.get('duplicate_frame_count', 0)} 个\n")
        report_lines.append(f"- **推断**: 同一视频同一帧的多条记录可能表示心内膜和心外膜轮廓\n")
    else:
        report_lines.append(f"\n### 心内膜/心外膜推断\n")
        report_lines.append(f"- **推断**: 每帧只有一组轮廓点，可能只标注了一种轮廓类型（通常是心内膜）\n")
    
    # FileList分析
    if filelist_analysis:
        report_lines.extend([
            f"\n## 2. FileList.csv 分析\n",
            f"### 基本信息\n",
            f"- **总行数**: {filelist_analysis.get('total_rows', 0):,}\n",
            f"- **列名**: {', '.join(filelist_analysis.get('columns', []))}\n"
        ])
        
        if filelist_analysis.get('has_ef'):
            ef_stats = filelist_analysis.get('ef_statistics', {})
            report_lines.extend([
                f"\n### 射血分数(EF)统计\n",
                f"- **EF列名**: {filelist_analysis.get('ef_column')}\n",
                f"- **范围**: {ef_stats.get('min', 0):.2f} - {ef_stats.get('max', 0):.2f}\n",
                f"- **均值**: {ef_stats.get('mean', 0):.2f} ± {ef_stats.get('std', 0):.2f}\n",
                f"- **中位数**: {ef_stats.get('median', 0):.2f}\n",
                f"- **非空数量**: {ef_stats.get('non_null_count', 0)}\n"
            ])
        
        if filelist_analysis.get('has_split'):
            report_lines.extend([
                f"\n### 数据划分\n",
                f"- **划分分布**: {filelist_analysis.get('split_distribution', {})}\n"
            ])
    
    # 示例视频分析
    if sample_analyses:
        report_lines.extend([
            f"\n## 3. 示例视频分析\n"
        ])
        for sample in sample_analyses:
            report_lines.extend([
                f"\n### 视频 {sample['video_id']}\n",
                f"- **标注帧数**: {sample['num_annotated_frames']}\n",
                f"- **标注帧列表**: {sample['annotated_frames'][:10]}{'...' if len(sample['annotated_frames']) > 10 else ''}\n",
                f"- **总点数**: {sample['total_points']}\n"
            ])
            if sample.get('has_duplicate_frames'):
                report_lines.append(f"- **重复帧**: {sample.get('duplicate_frames', [])}\n")
            if sample.get('ef') is not None:
                report_lines.append(f"- **EF**: {sample['ef']:.2f}\n")
            report_lines.append(f"- **说明**: {sample.get('note', '')}\n")
    
    # 结论和建议
    report_lines.extend([
        f"\n## 4. 结论和建议\n",
        f"\n### 掩码生成策略\n"
    ])
    
    if volume_analysis.get('has_duplicate_frames'):
        report_lines.append(
            "- **建议**: 发现同一帧有多条记录，可以尝试区分心内膜和心外膜\n"
            "- **方法**: 通过分析坐标点的分布特征（如距离中心点的距离）来区分内外膜\n"
            "- **掩码类型**: 可以生成多类别掩码（心内膜=1，心肌=2，心外膜=3）\n"
        )
    else:
        report_lines.append(
            "- **建议**: 每帧只有一组轮廓点，生成单一掩码（左心室区域）\n"
            "- **方法**: 直接使用轮廓点生成二值掩码\n"
            "- **掩码类型**: 二值掩码（左心室=255，背景=0）\n"
        )
    
    report_lines.extend([
        f"\n### 训练目标\n",
        f"- **分割目标**: 使用生成的掩码作为分割任务的ground truth\n",
        f"- **点提示目标**: 使用标注点坐标作为自动点提示生成器的训练目标\n",
        f"- **训练策略**: 仅在标注帧上计算损失\n"
    ])
    
    # 保存报告
    output_file = Path(output_path)
    output_file.parent.mkdir(parents=True, exist_ok=True)
    with open(output_file, 'w', encoding='utf-8') as f:
        f.writelines(report_lines)
    
    logger.info(f"分析报告已保存到: {output_path}")
    
    # 同时保存JSON格式的详细数据
    json_output = output_path.replace('.md', '.json')
    analysis_data = {
        'volume_tracings': volume_analysis,
        'filelist': filelist_analysis,
        'sample_videos': sample_analyses
    }
    with open(json_output, 'w', encoding='utf-8') as f:
        json.dump(analysis_data, f, indent=2, ensure_ascii=False)
    
    logger.info(f"详细分析数据已保存到: {json_output}")


def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='分析EchoNet-Dynamic元数据')
    parser.add_argument('--volume_csv', type=str, 
                       default='D:/Data/EchoNet-Dynamic/VolumeTracings.csv',
                       help='VolumeTracings.csv路径')
    parser.add_argument('--filelist_csv', type=str,
                       default='D:/Data/EchoNet-Dynamic/FileList.csv',
                       help='FileList.csv路径（可选）')
    parser.add_argument('--output', type=str,
                       default='docs/echonet_metadata_analysis.md',
                       help='输出报告路径')
    parser.add_argument('--sample_videos', type=str, nargs='+',
                       help='要分析的示例视频ID列表')
    
    args = parser.parse_args()
    
    # 如果没有提供示例视频，从数据中随机选择
    sample_videos = args.sample_videos
    if not sample_videos:
        df = pd.read_csv(args.volume_csv)
        sample_videos = df['FileName'].unique()[:5]
        sample_videos = [vid.replace('.avi', '') for vid in sample_videos]
    
    generate_analysis_report(
        volume_path=args.volume_csv,
        filelist_path=args.filelist_csv if Path(args.filelist_csv).exists() else None,
        output_path=args.output,
        sample_videos=sample_videos
    )


if __name__ == "__main__":
    main()

