#!/usr/bin/env python3
"""
从EchoNet-Dynamic标注数据生成掩码和标注点文件
为每个标注帧生成对应的掩码，并保存标注点信息
"""

import pandas as pd
import numpy as np
import cv2
import json
from pathlib import Path
from typing import Dict, List, Tuple, Optional
import sys
from tqdm import tqdm

# 添加项目根目录到路径
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root))

from scripts.load_echonet_labels import (
    load_echonet_labels, 
    parse_endo_epi_contours,
    get_annotation_frames,
    load_filelist_metadata
)
from src.data.annotation_mask_generator import MaskGenerator
from src.utils.logger import get_logger

logger = get_logger("GenerateEchoNetLabels")


def load_preprocessing_results(results_file: str) -> Dict:
    """加载预处理结果"""
    if not Path(results_file).exists():
        logger.error(f"预处理结果文件不存在: {results_file}")
        return {}
    
    with open(results_file, 'r', encoding='utf-8') as f:
        results = json.load(f)
    
    return results


def get_frame_mapping(frames_dir: Path) -> Dict[int, int]:
    """
    获取关键帧索引到原始帧号的映射
    
    Args:
        frames_dir: 关键帧目录
        
    Returns:
        字典 {keyframe_index: original_frame_num}
    """
    mapping = {}
    metadata_file = frames_dir / "metadata.json"
    
    if metadata_file.exists():
        with open(metadata_file, 'r', encoding='utf-8') as f:
            metadata = json.load(f)
        
        # 从metadata中获取key_indices
        key_indices = metadata.get('key_indices', [])
        for i, original_frame_num in enumerate(key_indices):
            mapping[i] = original_frame_num
    else:
        # 如果没有metadata，尝试从文件名推断
        frame_files = sorted(frames_dir.glob("keyframe_*.png"))
        for i, frame_file in enumerate(frame_files):
            # 尝试从文件名提取帧号
            try:
                frame_num = int(frame_file.stem.split('_')[1])
                mapping[i] = frame_num
            except:
                mapping[i] = i
    
    return mapping


def generate_masks_for_video(video_id: str,
                            frames_dir: str,
                            df_volume: pd.DataFrame,
                            mask_generator: MaskGenerator,
                            output_mask_dir: Path,
                            output_annotation_dir: Path,
                            mask_type: str = 'binary') -> Dict:
    """
    为视频生成掩码和标注点文件
    
    Args:
        video_id: 视频ID
        frames_dir: 关键帧目录
        df_volume: VolumeTracings数据框
        mask_generator: 掩码生成器
        output_mask_dir: 掩码输出目录
        output_annotation_dir: 标注点输出目录
        mask_type: 掩码类型
        
    Returns:
        生成结果统计
    """
    frames_dir_path = Path(frames_dir)
    if not frames_dir_path.exists():
        logger.warning(f"关键帧目录不存在: {frames_dir}")
        return {'success': False, 'reason': 'frames_dir_not_found'}
    
    # 获取标注帧列表
    annotation_frames = get_annotation_frames(df_volume, video_id)
    if not annotation_frames:
        logger.warning(f"视频 {video_id} 没有标注帧")
        return {'success': False, 'reason': 'no_annotation_frames'}
    
    # 获取帧映射
    frame_mapping = get_frame_mapping(frames_dir_path)
    
    # 读取第一帧获取图像尺寸
    first_frame_file = sorted(frames_dir_path.glob("keyframe_*.png"))
    if not first_frame_file:
        logger.warning(f"视频 {video_id} 没有关键帧文件")
        return {'success': False, 'reason': 'no_keyframes'}
    
    first_frame = cv2.imread(str(first_frame_file[0]))
    if first_frame is None:
        logger.warning(f"无法读取第一帧: {first_frame_file[0]}")
        return {'success': False, 'reason': 'cannot_read_frame'}
    
    height, width = first_frame.shape[:2]
    
    # 更新掩码生成器的图像尺寸
    mask_generator.image_shape = (height, width)
    mask_generator.height = height
    mask_generator.width = width
    
    # 创建输出目录
    video_mask_dir = output_mask_dir / video_id
    video_annotation_dir = output_annotation_dir / video_id
    video_mask_dir.mkdir(parents=True, exist_ok=True)
    video_annotation_dir.mkdir(parents=True, exist_ok=True)
    
    generated_masks = 0
    generated_annotations = 0
    matched_frames = []
    
    # 为每个标注帧生成掩码
    for original_frame_num in annotation_frames:
        # 查找对应的关键帧索引
        keyframe_index = None
        for kf_idx, orig_frame in frame_mapping.items():
            if orig_frame == original_frame_num:
                keyframe_index = kf_idx
                break
        
        if keyframe_index is None:
            # 尝试找到最接近的关键帧
            if frame_mapping:
                closest_idx = min(frame_mapping.keys(), 
                                 key=lambda k: abs(frame_mapping[k] - original_frame_num))
                if abs(frame_mapping[closest_idx] - original_frame_num) <= 2:
                    keyframe_index = closest_idx
                    logger.info(f"视频 {video_id} 帧 {original_frame_num} 映射到关键帧 {keyframe_index} (原始帧 {frame_mapping[closest_idx]})")
        
        if keyframe_index is None:
            logger.warning(f"视频 {video_id} 帧 {original_frame_num} 没有对应的关键帧")
            continue
        
        # 解析心内膜和心外膜轮廓
        contours = parse_endo_epi_contours(df_volume, video_id, original_frame_num)
        
        if contours['endo'] is None and contours['epi'] is None:
            logger.warning(f"视频 {video_id} 帧 {original_frame_num} 没有轮廓点")
            continue
        
        # 生成掩码
        try:
            mask = mask_generator.generate_lv_mask(
                contours['endo'], 
                contours['epi'], 
                mask_type
            )
            
            # 保存掩码
            mask_file = video_mask_dir / f"frame_{original_frame_num:03d}.png"
            cv2.imwrite(str(mask_file), mask)
            generated_masks += 1
            
            # 保存标注点信息
            annotation_data = {
                'video_id': video_id,
                'frame_num': original_frame_num,
                'keyframe_index': int(keyframe_index) if keyframe_index is not None else None,
                'endo_points': contours['endo'].tolist() if contours['endo'] is not None else None,
                'epi_points': contours['epi'].tolist() if contours['epi'] is not None else None,
                'image_shape': [height, width],
                'mask_type': mask_type
            }
            
            annotation_file = video_annotation_dir / f"frame_{original_frame_num:03d}.json"
            with open(annotation_file, 'w', encoding='utf-8') as f:
                json.dump(annotation_data, f, indent=2, ensure_ascii=False)
            generated_annotations += 1
            
            matched_frames.append({
                'original_frame': original_frame_num,
                'keyframe_index': keyframe_index,
                'has_endo': contours['endo'] is not None,
                'has_epi': contours['epi'] is not None
            })
            
        except Exception as e:
            logger.error(f"生成掩码失败 {video_id} 帧 {original_frame_num}: {e}")
            continue
    
    return {
        'success': True,
        'video_id': video_id,
        'generated_masks': generated_masks,
        'generated_annotations': generated_annotations,
        'annotation_frames': annotation_frames,
        'matched_frames': matched_frames
    }


def generate_all_labels(data_dir: str,
                       volume_csv: str,
                       filelist_csv: Optional[str] = None,
                       output_dir: str = "data/processed/segmentation",
                       mask_type: str = 'binary',
                       max_videos: Optional[int] = None) -> Dict:
    """
    为所有视频生成掩码和标注点
    
    Args:
        data_dir: 预处理数据目录
        volume_csv: VolumeTracings.csv路径
        filelist_csv: FileList.csv路径（可选）
        output_dir: 输出目录
        mask_type: 掩码类型
        max_videos: 最大处理视频数（用于测试）
        
    Returns:
        生成结果统计
    """
    logger.info("开始生成EchoNet-Dynamic标签...")
    
    # 加载数据
    results_file = Path(data_dir) / "preprocessing_results.json"
    results = load_preprocessing_results(str(results_file))
    
    if not results:
        logger.error("无法加载预处理结果")
        return {}
    
    # 加载标注数据
    logger.info(f"加载VolumeTracings.csv: {volume_csv}")
    df_volume = load_echonet_labels(volume_csv)
    
    # 加载FileList（如果存在）
    df_filelist = None
    if filelist_csv and Path(filelist_csv).exists():
        logger.info(f"加载FileList.csv: {filelist_csv}")
        df_filelist = load_filelist_metadata(filelist_csv)
    
    # 获取EchoNet-Dynamic样本
    echonet_samples = results.get('echonet_dynamic', {}).get('samples', [])
    
    if max_videos:
        echonet_samples = echonet_samples[:max_videos]
        logger.info(f"限制处理数量: {max_videos} 个视频")
    
    logger.info(f"找到 {len(echonet_samples)} 个EchoNet-Dynamic样本")
    
    # 创建输出目录
    output_path = Path(output_dir)
    mask_dir = output_path / "masks"
    annotation_dir = output_path / "annotations"
    mask_dir.mkdir(parents=True, exist_ok=True)
    annotation_dir.mkdir(parents=True, exist_ok=True)
    
    # 创建掩码生成器（初始尺寸会在处理每个视频时更新）
    mask_generator = MaskGenerator(image_shape=(112, 112))
    
    # 处理每个视频
    stats = {
        'total_videos': len(echonet_samples),
        'successful': 0,
        'failed': 0,
        'total_masks': 0,
        'total_annotations': 0,
        'errors': []
    }
    
    for sample in tqdm(echonet_samples, desc="生成标签"):
        video_id = sample['id']
        frames_dir = sample.get('frames_dir')
        
        if not frames_dir:
            logger.warning(f"视频 {video_id} 没有frames_dir")
            stats['failed'] += 1
            continue
        
        try:
            result = generate_masks_for_video(
                video_id=video_id,
                frames_dir=frames_dir,
                df_volume=df_volume,
                mask_generator=mask_generator,
                output_mask_dir=mask_dir,
                output_annotation_dir=annotation_dir,
                mask_type=mask_type
            )
            
            if result['success']:
                stats['successful'] += 1
                stats['total_masks'] += result['generated_masks']
                stats['total_annotations'] += result['generated_annotations']
            else:
                stats['failed'] += 1
                stats['errors'].append({
                    'video_id': video_id,
                    'reason': result.get('reason', 'unknown')
                })
                
        except Exception as e:
            logger.error(f"处理视频 {video_id} 时出错: {e}")
            stats['failed'] += 1
            stats['errors'].append({
                'video_id': video_id,
                'reason': str(e)
            })
    
    # 保存统计信息
    stats_file = output_path / "label_generation_stats.json"
    with open(stats_file, 'w', encoding='utf-8') as f:
        json.dump(stats, f, indent=2, ensure_ascii=False)
    
    logger.info("=" * 60)
    logger.info("标签生成完成!")
    logger.info(f"成功: {stats['successful']}/{stats['total_videos']}")
    logger.info(f"失败: {stats['failed']}/{stats['total_videos']}")
    logger.info(f"生成掩码: {stats['total_masks']} 个")
    logger.info(f"生成标注: {stats['total_annotations']} 个")
    logger.info(f"统计信息已保存到: {stats_file}")
    logger.info("=" * 60)
    
    return stats


def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='生成EchoNet-Dynamic标签')
    parser.add_argument('--data_dir', type=str,
                       default='data/processed/segmentation',
                       help='预处理数据目录')
    parser.add_argument('--volume_csv', type=str,
                       default='D:/Data/EchoNet-Dynamic/VolumeTracings.csv',
                       help='VolumeTracings.csv路径')
    parser.add_argument('--filelist_csv', type=str,
                       default='D:/Data/EchoNet-Dynamic/FileList.csv',
                       help='FileList.csv路径（可选）')
    parser.add_argument('--output_dir', type=str,
                       default='data/processed/segmentation',
                       help='输出目录')
    parser.add_argument('--mask_type', type=str,
                       choices=['binary', 'multi_class'],
                       default='binary',
                       help='掩码类型')
    parser.add_argument('--max_videos', type=int,
                       default=None,
                       help='最大处理视频数（用于测试）')
    
    args = parser.parse_args()
    
    generate_all_labels(
        data_dir=args.data_dir,
        volume_csv=args.volume_csv,
        filelist_csv=args.filelist_csv,
        output_dir=args.output_dir,
        mask_type=args.mask_type,
        max_videos=args.max_videos
    )


if __name__ == "__main__":
    main()

