#!/usr/bin/env python3
"""
更新预处理元数据文件
扫描已处理的目录，更新preprocessing_results.json和数据划分文件
不需要重新处理视频
"""

import json
import random
from pathlib import Path
from typing import Dict, List, Any, Optional
import argparse

def load_metadata_from_dir(video_dir: Path) -> Optional[Dict[str, Any]]:
    """从已处理的视频目录加载元数据"""
    metadata_file = video_dir / "metadata.json"
    if not metadata_file.exists():
        return None
    
    try:
        with open(metadata_file, 'r', encoding='utf-8') as f:
            metadata = json.load(f)
        
        # 检查是否有帧文件
        frame_files = list(video_dir.glob("*.png"))
        if not frame_files:
            return None
        
        return {
            'metadata': metadata,
            'num_frames': len(frame_files),
            'has_frames': True
        }
    except Exception as e:
        print(f"警告: 无法读取 {metadata_file}: {e}")
        return None

def scan_processed_echonet(processed_dir: Path, source_dir: Path) -> List[Dict[str, Any]]:
    """扫描已处理的EchoNet-Dynamic目录，构建样本列表"""
    samples = []
    
    if not processed_dir.exists():
        print(f"已处理目录不存在: {processed_dir}")
        return samples
    
    video_dirs = [d for d in processed_dir.iterdir() if d.is_dir()]
    print(f"扫描 {len(video_dirs)} 个已处理的视频目录...")
    
    for video_dir in video_dirs:
        video_id = video_dir.name
        
        # 加载元数据
        meta_info = load_metadata_from_dir(video_dir)
        if not meta_info:
            continue
        
        metadata = meta_info['metadata']
        
        # 查找对应的源视频文件
        video_file = source_dir / f"{video_id}.avi"
        if not video_file.exists():
            # 尝试其他扩展名
            for ext in ['.mp4', '.mov', '.mkv']:
                video_file = source_dir / f"{video_id}{ext}"
                if video_file.exists():
                    break
            else:
                video_file = None
        
        # 构建样本信息
        sample = {
            'id': video_id,
            'type': 'echonet_dynamic',
            'video_path': str(video_file) if video_file else f"未知路径/{video_id}",
            'frames_dir': str(video_dir),
            'num_frames': meta_info['num_frames'],
            'original_frames': metadata.get('original_frames', meta_info['num_frames']),
            'quality_score': metadata.get('quality_score', 0.0),
            'similar_groups': metadata.get('similar_groups', []),
            'metadata': metadata
        }
        
        samples.append(sample)
    
    print(f"成功扫描 {len(samples)} 个有效样本")
    return samples

def scan_processed_cardiacnet(processed_dir: Path) -> List[Dict[str, Any]]:
    """扫描已处理的CardiacNet目录，构建样本列表"""
    samples = []
    
    if not processed_dir.exists():
        return samples
    
    # 查找所有处理过的图像目录
    image_dirs = []
    for item in processed_dir.iterdir():
        if item.is_dir():
            # 检查是否包含处理过的图像
            if any(item.glob("*.png")):
                image_dirs.append(item)
    
    print(f"扫描 {len(image_dirs)} 个已处理的CardiacNet图像目录...")
    
    for image_dir in image_dirs:
        image_id = image_dir.name
        
        # 查找对应的掩码文件
        mask_file = image_dir.parent / f"{image_id.replace('_image', '_mask')}.png"
        if not mask_file.exists():
            # 尝试其他命名方式
            mask_file = image_dir / "mask.png"
        
        sample = {
            'id': image_id,
            'type': 'cardiacnet',
            'image_path': str(image_dir / "image.png") if (image_dir / "image.png").exists() else str(list(image_dir.glob("*.png"))[0]) if list(image_dir.glob("*.png")) else "",
            'mask_path': str(mask_file) if mask_file.exists() else "",
            'frames_dir': str(image_dir)
        }
        
        samples.append(sample)
    
    print(f"成功扫描 {len(samples)} 个CardiacNet样本")
    return samples

def create_data_splits(all_samples: List[Dict[str, Any]], 
                      train_ratio: float = 0.7,
                      val_ratio: float = 0.15,
                      test_ratio: float = 0.15,
                      random_seed: int = 42) -> Dict[str, List[Dict[str, Any]]]:
    """创建数据划分"""
    random.seed(random_seed)
    
    # 按类型分组
    echonet_samples = [s for s in all_samples if s.get('type') == 'echonet_dynamic']
    cardiacnet_samples = [s for s in all_samples if s.get('type') == 'cardiacnet']
    other_samples = [s for s in all_samples if s.get('type') not in ['echonet_dynamic', 'cardiacnet']]
    
    splits = {'train': [], 'val': [], 'test': []}
    
    # 分别划分每种类型的数据
    for sample_group in [echonet_samples, cardiacnet_samples, other_samples]:
        if not sample_group:
            continue
        
        random.shuffle(sample_group)
        total = len(sample_group)
        
        train_end = int(total * train_ratio)
        val_end = train_end + int(total * val_ratio)
        
        splits['train'].extend(sample_group[:train_end])
        splits['val'].extend(sample_group[train_end:val_end])
        splits['test'].extend(sample_group[val_end:])
    
    return splits

def update_metadata(output_dir: Path,
                   echonet_source_dir: Optional[Path] = None,
                   skip_cardiacnet: bool = False):
    """更新预处理元数据文件"""
    
    output_dir = Path(output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    
    print("="*60)
    print("更新预处理元数据文件")
    print("="*60)
    print()
    
    all_samples = []
    
    # 1. 扫描EchoNet-Dynamic
    if echonet_source_dir:
        echonet_processed_dir = output_dir / "echonet_dynamic"
        echonet_source_dir = Path(echonet_source_dir)
        
        if echonet_processed_dir.exists():
            print("扫描EchoNet-Dynamic数据集...")
            echonet_samples = scan_processed_echonet(echonet_processed_dir, echonet_source_dir)
            all_samples.extend(echonet_samples)
            print(f"  EchoNet-Dynamic: {len(echonet_samples)} 个样本\n")
    
    # 2. 扫描CardiacNet
    if not skip_cardiacnet:
        cardiacnet_processed_dir = output_dir / "cardiacnet"
        if cardiacnet_processed_dir.exists():
            print("扫描CardiacNet数据集...")
            cardiacnet_samples = scan_processed_cardiacnet(cardiacnet_processed_dir)
            all_samples.extend(cardiacnet_samples)
            print(f"  CardiacNet: {len(cardiacnet_samples)} 个样本\n")
    
    if not all_samples:
        print("⚠️  未找到任何已处理的样本")
        return
    
    # 3. 创建数据划分
    print("创建数据划分...")
    splits = create_data_splits(all_samples)
    print(f"  训练集: {len(splits['train'])} 个样本")
    print(f"  验证集: {len(splits['val'])} 个样本")
    print(f"  测试集: {len(splits['test'])} 个样本")
    print()
    
    # 4. 按类型统计样本
    echonet_samples = [s for s in all_samples if s.get('type') == 'echonet_dynamic']
    cardiacnet_samples = [s for s in all_samples if s.get('type') == 'cardiacnet']
    
    # 5. 更新preprocessing_results.json
    results = {}
    
    if echonet_samples:
        results['echonet_dynamic'] = {
            'dataset': 'EchoNet-Dynamic',
            'total_samples': len(echonet_samples),
            'valid_samples': len(echonet_samples),
            'samples': echonet_samples
        }
    
    if cardiacnet_samples:
        results['cardiacnet'] = {
            'dataset': 'CardiacNet',
            'total_samples': len(cardiacnet_samples),
            'valid_samples': len(cardiacnet_samples),
            'samples': cardiacnet_samples
        }
    
    results_file = output_dir / "preprocessing_results.json"
    with open(results_file, 'w', encoding='utf-8') as f:
        json.dump(results, f, indent=2, ensure_ascii=False)
    print(f"✅ 已更新预处理结果文件: {results_file}")
    
    # 6. 保存数据划分文件
    for split_name, samples in splits.items():
        split_file = output_dir / f"{split_name}_split.json"
        split_data = {'samples': samples}
        with open(split_file, 'w', encoding='utf-8') as f:
            json.dump(split_data, f, indent=2, ensure_ascii=False)
        print(f"✅ 已更新数据划分文件: {split_file}")
    
    print()
    print("="*60)
    print("✅ 元数据更新完成!")
    print("="*60)
    print(f"总样本数: {len(all_samples)}")
    print(f"  EchoNet-Dynamic: {len(echonet_samples)} 个")
    print(f"  CardiacNet: {len(cardiacnet_samples)} 个")

def main():
    parser = argparse.ArgumentParser(description="更新预处理元数据文件（不重新处理数据）")
    parser.add_argument("--output_dir", type=str, default="data/processed/segmentation",
                       help="输出目录")
    parser.add_argument("--echonet_dir", type=str, default="D:/Data/EchoNet-Dynamic/Videos",
                       help="EchoNet-Dynamic源数据目录")
    parser.add_argument("--skip_cardiacnet", action="store_true", default=False,
                       help="跳过CardiacNet数据集")
    
    args = parser.parse_args()
    
    update_metadata(
        output_dir=args.output_dir,
        echonet_source_dir=args.echonet_dir,
        skip_cardiacnet=args.skip_cardiacnet
    )

if __name__ == "__main__":
    main()

