#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
数据集准备脚本
用于准备自定义车辆检测数据集
"""

import os
import shutil
import random
from pathlib import Path
from typing import List, Tuple
import argparse
from sklearn.model_selection import train_test_split

class DatasetPreparator:
    def __init__(self, dataset_root: str = "datasets/custom_vehicle"):
        self.dataset_root = Path(dataset_root)
        self.setup_directories()
    
    def setup_directories(self):
        """创建数据集目录结构"""
        directories = [
            "images/train",
            "images/val", 
            "images/test",
            "labels/train",
            "labels/val",
            "labels/test"
        ]
        
        for dir_name in directories:
            (self.dataset_root / dir_name).mkdir(parents=True, exist_ok=True)
        
        print(f"数据集目录结构已创建: {self.dataset_root}")
    
    def add_images_from_directory(self, images_dir: str, labels_dir: str = None):
        """
        从目录添加图像和标签
        
        Args:
            images_dir: 图像目录路径
            labels_dir: 标签目录路径（可选）
        """
        images_path = Path(images_dir)
        
        if not images_path.exists():
            print(f"图像目录不存在: {images_dir}")
            return
        
        # 支持的图像格式
        image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp'}
        
        # 获取所有图像文件
        image_files = []
        for ext in image_extensions:
            image_files.extend(images_path.glob(f"*{ext}"))
            image_files.extend(images_path.glob(f"*{ext.upper()}"))
        
        if not image_files:
            print(f"在目录 {images_dir} 中未找到图像文件")
            return
        
        print(f"找到 {len(image_files)} 个图像文件")
        
        # 如果没有提供标签目录，假设标签文件与图像同名
        if labels_dir is None:
            labels_dir = images_dir
        
        labels_path = Path(labels_dir)
        
        # 处理每个图像文件
        for img_file in image_files:
            # 复制图像到临时目录
            temp_img_path = self.dataset_root / "temp_images" / img_file.name
            temp_img_path.parent.mkdir(exist_ok=True)
            shutil.copy2(img_file, temp_img_path)
            
            # 查找对应的标签文件
            label_file = labels_path / f"{img_file.stem}.txt"
            if label_file.exists():
                temp_label_path = self.dataset_root / "temp_labels" / f"{img_file.stem}.txt"
                temp_label_path.parent.mkdir(exist_ok=True)
                shutil.copy2(label_file, temp_label_path)
            else:
                print(f"警告: 未找到标签文件 {label_file}")
    
    def add_annotated_images(self, image_files: List[str], label_files: List[str]):
        """
        添加已标注的图像和标签文件
        
        Args:
            image_files: 图像文件路径列表
            label_files: 对应的标签文件路径列表
        """
        if len(image_files) != len(label_files):
            print("错误: 图像文件数量与标签文件数量不匹配")
            return
        
        for img_file, label_file in zip(image_files, label_files):
            if os.path.exists(img_file) and os.path.exists(label_file):
                # 复制到临时目录
                img_name = Path(img_file).name
                label_name = Path(label_file).name
                
                temp_img_path = self.dataset_root / "temp_images" / img_name
                temp_label_path = self.dataset_root / "temp_labels" / label_name
                
                temp_img_path.parent.mkdir(exist_ok=True)
                temp_label_path.parent.mkdir(exist_ok=True)
                
                shutil.copy2(img_file, temp_img_path)
                shutil.copy2(label_file, temp_label_path)
    
    def create_yolo_dataset(self, train_ratio: float = 0.7, val_ratio: float = 0.2, 
                          test_ratio: float = 0.1, random_seed: int = 42):
        """
        创建YOLO格式的数据集
        
        Args:
            train_ratio: 训练集比例
            val_ratio: 验证集比例
            test_ratio: 测试集比例
            random_seed: 随机种子
        """
        # 检查临时图像目录
        temp_images_dir = self.dataset_root / "temp_images"
        temp_labels_dir = self.dataset_root / "temp_labels"
        
        if not temp_images_dir.exists() or not list(temp_images_dir.iterdir()):
            print("错误: 没有找到图像文件，请先添加图像")
            return False
        
        # 获取所有图像文件
        image_files = list(temp_images_dir.glob("*"))
        image_files = [f for f in image_files if f.suffix.lower() in {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp'}]
        
        if not image_files:
            print("错误: 没有找到有效的图像文件")
            return False
        
        # 检查比例总和
        total_ratio = train_ratio + val_ratio + test_ratio
        if abs(total_ratio - 1.0) > 1e-6:
            print(f"警告: 比例总和为 {total_ratio}，不等于1.0，将自动调整")
            train_ratio /= total_ratio
            val_ratio /= total_ratio
            test_ratio /= total_ratio
        
        # 随机分割数据
        random.seed(random_seed)
        random.shuffle(image_files)
        
        total_images = len(image_files)
        train_size = int(total_images * train_ratio)
        val_size = int(total_images * val_ratio)
        
        train_files = image_files[:train_size]
        val_files = image_files[train_size:train_size + val_size]
        test_files = image_files[train_size + val_size:]
        
        print(f"数据分割结果:")
        print(f"  训练集: {len(train_files)} 个文件 ({len(train_files)/total_images*100:.1f}%)")
        print(f"  验证集: {len(val_files)} 个文件 ({len(val_files)/total_images*100:.1f}%)")
        print(f"  测试集: {len(test_files)} 个文件 ({len(test_files)/total_images*100:.1f}%)")
        
        # 复制文件到对应目录
        splits = {
            'train': train_files,
            'val': val_files,
            'test': test_files
        }
        
        for split_name, files in splits.items():
            for img_file in files:
                # 复制图像
                dst_img = self.dataset_root / "images" / split_name / img_file.name
                shutil.copy2(img_file, dst_img)
                
                # 复制标签（如果存在）
                label_file = temp_labels_dir / f"{img_file.stem}.txt"
                if label_file.exists():
                    dst_label = self.dataset_root / "labels" / split_name / f"{img_file.stem}.txt"
                    shutil.copy2(label_file, dst_label)
        
        # 清理临时目录
        self.cleanup_temp()
        
        print("数据集创建完成！")
        return True
    
    def cleanup_temp(self):
        """清理临时文件"""
        temp_dirs = ["temp_images", "temp_labels"]
        for dir_name in temp_dirs:
            temp_dir = self.dataset_root / dir_name
            if temp_dir.exists():
                shutil.rmtree(temp_dir)
        print("临时文件已清理")
    
    def create_dataset_yaml(self, class_names: List[str] = None):
        """
        创建数据集配置文件
        
        Args:
            class_names: 类别名称列表
        """
        if class_names is None:
            # 默认车辆类别
            class_names = ['car', 'truck', 'bus', 'motorcycle', 'bicycle']
        
        # 检查每个类别的标签文件
        all_labels = []
        for split in ['train', 'val', 'test']:
            split_labels = list((self.dataset_root / "labels" / split).glob("*.txt"))
            all_labels.extend(split_labels)
        
        if all_labels:
            # 分析标签文件，确定实际类别数量
            classes = set()
            for label_file in all_labels:
                try:
                    with open(label_file, 'r', encoding='utf-8') as f:
                        for line in f:
                            if line.strip():
                                class_id = int(line.split()[0])
                                classes.add(class_id)
                except:
                    continue
            
            # 重新映射类别ID
            class_ids = sorted(list(classes))
            if class_ids != list(range(len(class_ids))):
                print("检测到不连续的类别ID，将重新映射")
                id_mapping = {old_id: new_id for new_id, old_id in enumerate(class_ids)}
                
                for split in ['train', 'val', 'test']:
                    for label_file in (self.dataset_root / "labels" / split).glob("*.txt"):
                        with open(label_file, 'r', encoding='utf-8') as f:
                            lines = f.readlines()
                        
                        with open(label_file, 'w', encoding='utf-8') as f:
                            for line in lines:
                                if line.strip():
                                    parts = line.split()
                                    parts[0] = str(id_mapping[int(parts[0])])
                                    f.write(' '.join(parts) + '\n')
            
            # 调整类别列表
            if len(class_ids) > len(class_names):
                class_names.extend([f'class_{i}' for i in range(len(class_names), len(class_ids))])
            elif len(class_ids) < len(class_names):
                class_names = class_names[:len(class_ids)]
        
        # 创建YAML配置
        config = {
            'path': str(self.dataset_root.absolute()),
            'train': 'images/train',
            'val': 'images/val',
            'test': 'images/test' if (self.dataset_root / 'images' / 'test').exists() else 'images/val',
            'nc': len(class_names),
            'names': {i: name for i, name in enumerate(class_names)}
        }
        
        # 保存配置文件
        yaml_path = self.dataset_root / 'dataset.yaml'
        with open(yaml_path, 'w', encoding='utf-8') as f:
            import yaml
            yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
        
        print(f"数据集配置文件已创建: {yaml_path}")
        print(f"类别配置: {config['names']}")
        
        return yaml_path

def main():
    parser = argparse.ArgumentParser(description='数据集准备工具')
    parser.add_argument('--dataset_path', type=str, default='datasets/custom_vehicle',
                       help='数据集根目录路径')
    parser.add_argument('--add_images', type=str, nargs='+',
                       help='添加图像目录或文件路径')
    parser.add_argument('--add_labels', type=str, nargs='+',
                       help='对应的标签目录或文件路径')
    parser.add_argument('--train_ratio', type=float, default=0.7,
                       help='训练集比例')
    parser.add_argument('--val_ratio', type=float, default=0.2,
                       help='验证集比例')
    parser.add_argument('--test_ratio', type=float, default=0.1,
                       help='测试集比例')
    parser.add_argument('--class_names', type=str, nargs='+',
                       default=['car', 'truck', 'bus', 'motorcycle', 'bicycle'],
                       help='类别名称列表')
    parser.add_argument('--create', action='store_true',
                       help='创建数据集并生成配置文件')
    
    args = parser.parse_args()
    
    # 创建数据集准备器
    preparator = DatasetPreparator(args.dataset_path)
    
    if args.add_images:
        # 添加图像
        if args.add_labels and len(args.add_labels) == len(args.add_images):
            preparator.add_annotated_images(args.add_images, args.add_labels)
        else:
            for img_dir in args.add_images:
                preparator.add_images_from_directory(img_dir)
    
    if args.create:
        # 创建数据集
        if preparator.create_yolo_dataset(args.train_ratio, args.val_ratio, args.test_ratio):
            # 生成配置文件
            preparator.create_dataset_yaml(args.class_names)
    
    print("数据集准备完成！")

if __name__ == '__main__':
    main()