#!/usr/bin/env python3
"""
加载EchoNet-Dynamic数据集的真实标签
"""

import pandas as pd
import numpy as np
import cv2
import torch
from pathlib import Path
from typing import Dict, List, Tuple, Optional
import json


def load_echonet_labels(csv_path: str) -> pd.DataFrame:
    """加载EchoNet-Dynamic标签数据"""
    df = pd.read_csv(csv_path)
    return df


def parse_contour_points(df: pd.DataFrame, video_id: str, frame_num: int) -> Optional[np.ndarray]:
    """
    解析特定视频和帧的轮廓点
    
    Args:
        df: 标签数据框
        video_id: 视频ID (不包含.avi扩展名)
        frame_num: 帧号
        
    Returns:
        轮廓点数组 (N, 2) 或 None
    """
    # 筛选特定视频和帧的数据
    video_data = df[(df['FileName'] == f"{video_id}.avi") & (df['Frame'] == frame_num)]
    
    if video_data.empty:
        return None
    
    # 提取坐标点
    points = []
    for _, row in video_data.iterrows():
        x1, y1, x2, y2 = row['X1'], row['Y1'], row['X2'], row['Y2']
        points.append([x1, y1])
        points.append([x2, y2])
    
    return np.array(points, dtype=np.float32)


def parse_endo_epi_contours(df: pd.DataFrame, video_id: str, frame_num: int) -> Dict[str, Optional[np.ndarray]]:
    """
    解析心内膜和心外膜轮廓点
    
    Args:
        df: 标签数据框
        video_id: 视频ID (不包含.avi扩展名)
        frame_num: 帧号
        
    Returns:
        字典，包含 'endo' 和 'epi' 键，值为轮廓点数组或None
    """
    # 筛选特定视频和帧的数据
    video_data = df[(df['FileName'] == f"{video_id}.avi") & (df['Frame'] == frame_num)]
    
    if video_data.empty:
        return {'endo': None, 'epi': None}
    
    # 如果只有一条记录，假设是心内膜
    if len(video_data) == 1:
        row = video_data.iloc[0]
        points = [[row['X1'], row['Y1']], [row['X2'], row['Y2']]]
        return {
            'endo': np.array(points, dtype=np.float32),
            'epi': None
        }
    
    # 如果有多条记录，尝试区分心内膜和心外膜
    # 方法：计算每个轮廓的中心点，距离图像中心更近的通常是心内膜
    contours = []
    for _, row in video_data.iterrows():
        points = [[row['X1'], row['Y1']], [row['X2'], row['Y2']]]
        contours.append({
            'points': np.array(points, dtype=np.float32),
            'row': row
        })
    
    # 如果只有两条记录，假设第一条是心内膜，第二条是心外膜
    if len(contours) == 2:
        return {
            'endo': contours[0]['points'],
            'epi': contours[1]['points']
        }
    
    # 如果有多条记录，使用更复杂的区分方法
    # 计算每个轮廓的中心点
    contour_centers = []
    for contour in contours:
        center = contour['points'].mean(axis=0)
        contour_centers.append({
            'center': center,
            'points': contour['points'],
            'distance_from_origin': np.linalg.norm(center)
        })
    
    # 按距离排序，距离原点更近的假设是心内膜
    contour_centers.sort(key=lambda x: x['distance_from_origin'])
    
    return {
        'endo': contour_centers[0]['points'] if len(contour_centers) > 0 else None,
        'epi': contour_centers[-1]['points'] if len(contour_centers) > 1 else None
    }


def get_annotation_frames(df: pd.DataFrame, video_id: str) -> List[int]:
    """
    获取视频的所有标注帧列表
    
    Args:
        df: 标签数据框
        video_id: 视频ID (不包含.avi扩展名)
        
    Returns:
        标注帧号列表（已排序）
    """
    video_data = df[df['FileName'] == f"{video_id}.avi"]
    if video_data.empty:
        return []
    
    frames = sorted(video_data['Frame'].unique().tolist())
    return frames


def load_filelist_metadata(csv_path: str) -> pd.DataFrame:
    """
    加载FileList.csv中的元数据
    
    Args:
        csv_path: FileList.csv路径
        
    Returns:
        元数据数据框
    """
    if not Path(csv_path).exists():
        return pd.DataFrame()
    
    df = pd.read_csv(csv_path)
    return df


def create_mask_from_contour(contour_points: np.ndarray, image_shape: Tuple[int, int]) -> np.ndarray:
    """
    从轮廓点创建掩码
    
    Args:
        contour_points: 轮廓点 (N, 2)
        image_shape: 图像形状 (height, width)
        
    Returns:
        二值掩码 (height, width)
    """
    if contour_points is None or len(contour_points) < 3:
        return np.zeros(image_shape, dtype=np.uint8)
    
    # 创建掩码
    mask = np.zeros(image_shape, dtype=np.uint8)
    
    # 将轮廓点转换为整数坐标，并确保在图像范围内
    points = contour_points.astype(np.int32)
    points[:, 0] = np.clip(points[:, 0], 0, image_shape[1] - 1)  # x坐标
    points[:, 1] = np.clip(points[:, 1], 0, image_shape[0] - 1)  # y坐标
    
    # 确保有足够的点来形成多边形
    if len(points) >= 3:
        # 使用cv2.fillPoly填充轮廓
        cv2.fillPoly(mask, [points], 255)
    
    return mask


def load_video_labels(video_id: str, csv_path: str, frames_dir: str) -> Dict[int, np.ndarray]:
    """
    加载特定视频的所有帧标签
    
    Args:
        video_id: 视频ID
        csv_path: 标签CSV文件路径
        frames_dir: 帧图像目录
        
    Returns:
        字典 {frame_num: mask}
    """
    # 加载标签数据
    df = load_echonet_labels(csv_path)
    
    # 获取该视频的所有帧
    video_data = df[df['FileName'] == f"{video_id}.avi"]
    frame_numbers = sorted(video_data['Frame'].unique())
    
    # 读取第一帧获取图像尺寸
    first_frame_path = Path(frames_dir) / "keyframe_000.png"
    if first_frame_path.exists():
        first_frame = cv2.imread(str(first_frame_path))
        if first_frame is not None:
            height, width = first_frame.shape[:2]
        else:
            height, width = 224, 224  # 默认尺寸
    else:
        height, width = 224, 224  # 默认尺寸
    
    # 为每一帧创建掩码
    frame_masks = {}
    for frame_num in frame_numbers:
        contour_points = parse_contour_points(df, video_id, frame_num)
        mask = create_mask_from_contour(contour_points, (height, width))
        frame_masks[frame_num] = mask
        print(f"  帧 {frame_num}: 轮廓点数={len(contour_points) if contour_points is not None else 0}, 掩码非零像素={np.count_nonzero(mask)}")
    
    return frame_masks


def load_test_data_with_labels(data_dir: str, num_samples: int = 10) -> List[Dict]:
    """
    加载测试数据和对应的真实标签
    
    Args:
        data_dir: 数据目录
        num_samples: 样本数量
        
    Returns:
        测试数据列表
    """
    data_path = Path(data_dir)
    results_file = data_path / "preprocessing_results.json"
    csv_path = "D:/Data/EchoNet-Dynamic/VolumeTracings.csv"
    
    if not results_file.exists():
        print(f"预处理结果文件不存在: {results_file}")
        return []
    
    # 加载预处理结果
    with open(results_file, 'r', encoding='utf-8') as f:
        results = json.load(f)
    
    test_data = []
    echonet_data = results.get('echonet_dynamic', {}).get('samples', [])
    
    for i, sample in enumerate(echonet_data[:num_samples]):
        video_id = sample['id']
        frames_dir = sample['frames_dir']
        
        try:
            # 加载该视频的标签
            frame_masks = load_video_labels(video_id, csv_path, frames_dir)
            
            # 加载metadata.json获取关键帧索引映射
            metadata_path = Path(frames_dir) / "metadata.json"
            key_indices = None
            if metadata_path.exists():
                with open(metadata_path, 'r', encoding='utf-8') as f:
                    metadata = json.load(f)
                    key_indices = metadata.get('key_indices', [])
            
            # 加载关键帧
            keyframes = []
            keyframe_masks = []
            
            # 获取可用的帧号列表
            available_frame_nums = sorted(frame_masks.keys())
            print(f"  可用标签帧号: {available_frame_nums}")
            if key_indices:
                print(f"  关键帧索引映射: {key_indices[:10]}... (共{len(key_indices)}个)")
            
            # 按文件名排序关键帧文件
            frame_files = sorted(Path(frames_dir).glob("keyframe_*.png"), 
                               key=lambda x: int(x.stem.split('_')[1]))
            
            matched_count = 0
            for i, frame_file in enumerate(frame_files):
                frame = cv2.imread(str(frame_file))
                if frame is not None:
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    frame = cv2.resize(frame, (224, 224))
                    keyframes.append(frame)
                    
                    # 获取原始视频帧号
                    if key_indices and i < len(key_indices):
                        # 使用metadata中的key_indices映射
                        original_frame_num = key_indices[i]
                    else:
                        # 如果没有metadata，尝试从文件名解析（向后兼容）
                        original_frame_num = int(frame_file.stem.split('_')[1])
                    
                    # 匹配标签
                    if original_frame_num in frame_masks:
                        mask = cv2.resize(frame_masks[original_frame_num], (224, 224))
                        keyframe_masks.append(mask)
                        matched_count += 1
                        if matched_count <= 3:  # 只打印前3个匹配的详细信息
                            print(f"    ✓ 关键帧 {i} (原始帧{original_frame_num}): 匹配到标签，掩码非零像素={np.count_nonzero(mask)}")
                    else:
                        # 尝试找到最接近的有标签的帧号
                        if available_frame_nums:
                            closest_frame = min(available_frame_nums, 
                                              key=lambda x: abs(x - original_frame_num))
                            # 如果最接近的帧号在合理范围内（±5帧），使用它
                            if abs(closest_frame - original_frame_num) <= 5:
                                mask = cv2.resize(frame_masks[closest_frame], (224, 224))
                                keyframe_masks.append(mask)
                                matched_count += 1
                                if matched_count <= 3:
                                    print(f"    ≈ 关键帧 {i} (原始帧{original_frame_num}): 使用最接近的帧{closest_frame}，掩码非零像素={np.count_nonzero(mask)}")
                            else:
                                # 如果没有标签，创建零掩码
                                keyframe_masks.append(np.zeros((224, 224), dtype=np.uint8))
                        else:
                            # 如果没有标签，创建零掩码
                            keyframe_masks.append(np.zeros((224, 224), dtype=np.uint8))
            
            print(f"  匹配的掩码数: {matched_count}/{len(keyframes)}")
            
            if keyframes and keyframe_masks:
                test_data.append({
                    'video_id': video_id,
                    'frames': keyframes,
                    'masks': keyframe_masks,
                    'num_frames': len(keyframes)
                })
                
        except Exception as e:
            print(f"加载视频 {video_id} 的标签时出错: {e}")
            continue
    
    print(f"成功加载 {len(test_data)} 个带标签的测试样本")
    return test_data


def test_label_loading():
    """测试标签加载功能"""
    print("测试EchoNet-Dynamic标签加载...")
    
    # 测试加载标签数据
    csv_path = "D:/Data/EchoNet-Dynamic/VolumeTracings.csv"
    df = load_echonet_labels(csv_path)
    print(f"标签数据形状: {df.shape}")
    print(f"视频数量: {df['FileName'].nunique()}")
    print(f"总帧数: {len(df)}")
    
    # 测试解析轮廓点
    video_id = "0X100009310A3BD7FC"
    frame_num = 46
    contour_points = parse_contour_points(df, video_id, frame_num)
    print(f"视频 {video_id} 帧 {frame_num} 的轮廓点数: {len(contour_points) if contour_points is not None else 0}")
    
    # 测试创建掩码
    if contour_points is not None:
        print(f"轮廓点范围: X=[{contour_points[:, 0].min():.1f}, {contour_points[:, 0].max():.1f}], Y=[{contour_points[:, 1].min():.1f}, {contour_points[:, 1].max():.1f}]")
        mask = create_mask_from_contour(contour_points, (224, 224))
        print(f"掩码形状: {mask.shape}, 非零像素数: {np.count_nonzero(mask)}")
        
        # 调试：检查轮廓点是否在图像范围内
        points = contour_points.astype(np.int32)
        points[:, 0] = np.clip(points[:, 0], 0, 223)  # x坐标
        points[:, 1] = np.clip(points[:, 1], 0, 223)  # y坐标
        print(f"裁剪后轮廓点范围: X=[{points[:, 0].min()}, {points[:, 0].max()}], Y=[{points[:, 1].min()}, {points[:, 1].max()}]")
        
        # 手动创建掩码测试
        test_mask = np.zeros((224, 224), dtype=np.uint8)
        if len(points) >= 3:
            cv2.fillPoly(test_mask, [points], 255)
            print(f"手动创建掩码非零像素数: {np.count_nonzero(test_mask)}")
    
    # 测试加载测试数据
    test_data = load_test_data_with_labels("data/processed/segmentation", num_samples=5)
    print(f"加载的测试样本数: {len(test_data)}")
    
    if test_data:
        sample = test_data[0]
        print(f"样本视频ID: {sample['video_id']}")
        print(f"帧数: {sample['num_frames']}")
        print(f"掩码数: {len(sample['masks'])}")
        
        # 检查掩码质量
        total_pixels = 0
        non_zero_pixels = 0
        for i, mask in enumerate(sample['masks']):
            total_pixels += mask.size
            non_zero = np.count_nonzero(mask)
            non_zero_pixels += non_zero
            if i < 3:  # 只打印前3个掩码的详细信息
                print(f"  掩码 {i}: 非零像素数 = {non_zero}, 覆盖率 = {non_zero / mask.size * 100:.2f}%")
        
        print(f"总掩码覆盖率: {non_zero_pixels / total_pixels * 100:.2f}%")


if __name__ == "__main__":
    test_label_loading()
