#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FLUX训练数据集验证脚本
检查训练数据的完整性和质量
"""

import os
import json
from pathlib import Path
from PIL import Image
import argparse

def validate_image_files(image_dir):
    """验证图像文件"""
    image_extensions = {'.jpg', '.jpeg', '.png', '.webp', '.bmp'}
    valid_images = []
    invalid_images = []
    
    if not os.path.exists(image_dir):
        print(f"❌ 图像目录不存在: {image_dir}")
        return valid_images, invalid_images
    
    for file_path in Path(image_dir).rglob('*'):
        if file_path.suffix.lower() in image_extensions:
            try:
                # 尝试打开图像验证格式
                with Image.open(file_path) as img:
                    width, height = img.size
                    if width < 256 or height < 256:
                        invalid_images.append((file_path, f"分辨率过小: {width}x{height}"))
                    elif width > 4096 or height > 4096:
                        invalid_images.append((file_path, f"分辨率过大: {width}x{height}"))
                    else:
                        valid_images.append((file_path, width, height))
            except Exception as e:
                invalid_images.append((file_path, f"图像损坏: {str(e)}"))
    
    return valid_images, invalid_images

def validate_caption_files(image_dir, caption_ext='.txt'):
    """验证标题文件"""
    caption_files = []
    missing_captions = []
    empty_captions = []
    
    image_extensions = {'.jpg', '.jpeg', '.png', '.webp', '.bmp'}
    
    for file_path in Path(image_dir).rglob('*'):
        if file_path.suffix.lower() in image_extensions:
            caption_path = file_path.with_suffix(caption_ext)
            if caption_path.exists():
                # 检查标题文件内容
                try:
                    with open(caption_path, 'r', encoding='utf-8') as f:
                        content = f.read().strip()
                    if content:
                        caption_files.append((caption_path, len(content)))
                    else:
                        empty_captions.append(caption_path)
                except Exception as e:
                    empty_captions.append((caption_path, f"读取错误: {str(e)}"))
            else:
                missing_captions.append(file_path)
    
    return caption_files, missing_captions, empty_captions

def analyze_caption_quality(caption_files):
    """分析标题质量"""
    if not caption_files:
        return {}
    
    total_length = sum(length for _, length in caption_files)
    avg_length = total_length / len(caption_files)
    
    # 分析标题长度分布
    short_captions = [f for f, l in caption_files if l < 50]
    medium_captions = [f for f, l in caption_files if 50 <= l <= 200]
    long_captions = [f for f, l in caption_files if l > 200]
    
    return {
        'total_captions': len(caption_files),
        'avg_length': avg_length,
        'short_captions': len(short_captions),
        'medium_captions': len(medium_captions),
        'long_captions': len(long_captions)
    }

def generate_dataset_report(image_dir, output_file=None):
    """生成数据集报告"""
    print("========================================")
    print("FLUX训练数据集验证报告")
    print("========================================")
    
    # 验证图像文件
    print("\n1. 验证图像文件...")
    valid_images, invalid_images = validate_image_files(image_dir)
    
    print(f"✓ 有效图像: {len(valid_images)} 个")
    if valid_images:
        resolutions = [(w, h) for _, w, h in valid_images]
        min_res = min(resolutions)
        max_res = max(resolutions)
        print(f"   分辨率范围: {min_res[0]}x{min_res[1]} - {max_res[0]}x{max_res[1]}")
    
    if invalid_images:
        print(f"❌ 无效图像: {len(invalid_images)} 个")
        for img_path, reason in invalid_images[:5]:
            print(f"   {img_path.name}: {reason}")
        if len(invalid_images) > 5:
            print(f"   ... 还有 {len(invalid_images) - 5} 个无效图像")
    
    # 验证标题文件
    print("\n2. 验证标题文件...")
    caption_files, missing_captions, empty_captions = validate_caption_files(image_dir)
    
    print(f"✓ 有效标题: {len(caption_files)} 个")
    if missing_captions:
        print(f"❌ 缺少标题: {len(missing_captions)} 个")
        for img_path in missing_captions[:5]:
            print(f"   {img_path.name}")
        if len(missing_captions) > 5:
            print(f"   ... 还有 {len(missing_captions) - 5} 个缺少标题")
    
    if empty_captions:
        print(f"⚠️  空标题文件: {len(empty_captions)} 个")
        for caption_path in empty_captions[:5]:
            if isinstance(caption_path, tuple):
                print(f"   {caption_path[0].name}: {caption_path[1]}")
            else:
                print(f"   {caption_path.name}")
        if len(empty_captions) > 5:
            print(f"   ... 还有 {len(empty_captions) - 5} 个空标题文件")
    
    # 分析标题质量
    print("\n3. 分析标题质量...")
    quality_stats = analyze_caption_quality(caption_files)
    if quality_stats:
        print(f"标题总数: {quality_stats['total_captions']}")
        print(f"平均长度: {quality_stats['avg_length']:.1f} 字符")
        print(f"短标题 (<50字符): {quality_stats['short_captions']} 个")
        print(f"中等标题 (50-200字符): {quality_stats['medium_captions']} 个")
        print(f"长标题 (>200字符): {quality_stats['long_captions']} 个")
    
    # 生成建议
    print("\n4. 数据集建议...")
    total_images = len(valid_images) + len(invalid_images)
    total_captions = len(caption_files)
    
    if total_images == 0:
        print("❌ 没有找到任何图像文件")
        print("建议: 将训练图像放入 images/ 目录")
    elif total_captions == 0:
        print("❌ 没有找到任何标题文件")
        print("建议: 为每个图像创建对应的 .txt 标题文件")
    elif total_images != total_captions:
        print(f"⚠️  图像和标题数量不匹配 (图像: {total_images}, 标题: {total_captions})")
        print("建议: 确保每个图像都有对应的标题文件")
    else:
        print("✓ 数据集完整性良好")
    
    if len(valid_images) < 10:
        print("⚠️  训练图像数量较少 (< 10)")
        print("建议: 增加训练图像数量以提高模型效果")
    
    if quality_stats and quality_stats['short_captions'] > quality_stats['total_captions'] * 0.5:
        print("⚠️  短标题比例较高")
        print("建议: 提供更详细的图像描述")
    
    # 生成报告文件
    if output_file:
        report = {
            'dataset_path': str(image_dir),
            'validation_time': str(Path().cwd()),
            'summary': {
                'total_images': total_images,
                'valid_images': len(valid_images),
                'invalid_images': len(invalid_images),
                'total_captions': total_captions,
                'missing_captions': len(missing_captions),
                'empty_captions': len(empty_captions)
            },
            'quality_stats': quality_stats,
            'issues': {
                'invalid_images': [{'file': str(f), 'reason': r} for f, r in invalid_images],
                'missing_captions': [str(f) for f in missing_captions],
                'empty_captions': [str(f) for f in empty_captions]
            }
        }
        
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(report, f, indent=2, ensure_ascii=False)
        print(f"\n✓ 报告已保存到: {output_file}")
    
    print("\n========================================")
    print("验证完成！")
    print("========================================")

def main():
    parser = argparse.ArgumentParser(description="FLUX训练数据集验证工具")
    parser.add_argument("--image-dir", default="training_data_all/images", help="图像目录路径")
    parser.add_argument("--output", help="输出报告文件路径")
    
    args = parser.parse_args()
    
    image_dir = Path(args.image_dir)
    output_file = args.output
    
    if not image_dir.exists():
        print(f"❌ 图像目录不存在: {image_dir}")
        print("请先运行 setup_training_environment.sh 创建目录结构")
        return
    
    generate_dataset_report(image_dir, output_file)

if __name__ == "__main__":
    main()
