import os
import shutil
import random
from pathlib import Path

def split_dataset(source_dir, target_dir, train_ratio=0.7, val_ratio=0.2, test_ratio=0.1, seed=42):
    """
    将源目录结构:
    A/
        JPEGImages/
        txt/
    
    重组并划分为目标目录结构:
    B/
        train/
            JPEGImages/
            labels/
        val/
            JPEGImages/
            labels/
        test/
            JPEGImages/
            labels/
    
    并生成train.txt、val.txt和test.txt，分别包含各数据集图像的绝对路径
    
    参数:
        source_dir: 源文件夹路径
        target_dir: 目标文件夹路径
        train_ratio: 训练集比例
        val_ratio: 验证集比例
        test_ratio: 测试集比例
        seed: 随机种子，确保划分结果可重现
    """
    # 确保划分比例合理
    assert abs(train_ratio + val_ratio + test_ratio - 1.0) < 1e-10, "划分比例之和必须为1"
    
    # 设置随机种子以确保可重现性
    random.seed(seed)
    
    # 确保目标路径存在
    target_dir = Path(target_dir)
    if not target_dir.exists():
        os.makedirs(target_dir)
    
    source_dir = Path(source_dir)
    
    # 获取所有图像文件
    image_dir = source_dir / 'JPEGImages'
    txt_dir = source_dir / 'txt'
    
    if not image_dir.exists() or not txt_dir.exists():
        raise FileNotFoundError(f"源目录结构不正确，请确保{image_dir}和{txt_dir}存在")
    
    # 获取所有图像文件名（不含扩展名）
    image_files = [f.stem for f in image_dir.iterdir() if f.is_file() and f.suffix.lower() in ['.jpg', '.jpeg', '.png']]
    
    # 随机打乱文件列表
    random.shuffle(image_files)
    
    # 计算每个数据集的大小
    total_count = len(image_files)
    train_count = int(total_count * train_ratio)
    val_count = int(total_count * val_ratio)
    
    # 划分数据集
    train_files = image_files[:train_count]
    val_files = image_files[train_count:train_count + val_count]
    test_files = image_files[train_count + val_count:]
    
    # 创建目标目录结构
    for split in ['train', 'val', 'test']:
        split_dir = target_dir / split
        jpeg_dir = split_dir / 'JPEGImages'
        labels_dir = split_dir / 'labels'
        
        os.makedirs(jpeg_dir, exist_ok=True)
        os.makedirs(labels_dir, exist_ok=True)
    
    # 复制文件到对应目录
    datasets = {
        'train': train_files,
        'val': val_files,
        'test': test_files
    }
    
    # 为每个数据集创建txt文件
    txt_files = {}
    for split in datasets.keys():
        txt_files[split] = open(target_dir / f"{split}.txt", 'w')
    
    # 处理每个数据集
    for split, file_list in datasets.items():
        for filename in file_list:
            # 复制图像文件
            for img_ext in ['.jpg', '.jpeg', '.png']:
                img_file = image_dir / f"{filename}{img_ext}"
                if img_file.exists():
                    dest_img = target_dir / split / 'JPEGImages' / img_file.name
                    shutil.copy2(img_file, dest_img)
                    # 记录绝对路径到txt文件
                    txt_files[split].write(f"{dest_img.absolute()}\n")
                    break
            
            # 复制标签文件
            txt_file = txt_dir / f"{filename}.txt"
            if txt_file.exists():
                dest_txt = target_dir / split / 'labels' / txt_file.name
                shutil.copy2(txt_file, dest_txt)
    
    # 关闭所有txt文件
    for file in txt_files.values():
        file.close()
    
    # 打印数据集统计信息
    print(f"数据集划分完成:")
    print(f"  训练集: {len(train_files)} 个文件 ({train_ratio*100:.1f}%)")
    print(f"  验证集: {len(val_files)} 个文件 ({val_ratio*100:.1f}%)")
    print(f"  测试集: {len(test_files)} 个文件 ({test_ratio*100:.1f}%)")
    print(f"已在{target_dir}目录下生成train.txt、val.txt和test.txt文件")

if __name__ == "__main__":
    # 获取用户输入
    source_directory = r"/home/shuai/Downloads/total_202502271715"
    target_directory = r"/home/shuai/darknet/data/mydataset"
    
    # 可选：自定义划分比例
    train = 0.7
    val = 0.2
    test = 0.1
    
    # 执行数据集划分
    split_dataset(source_directory, target_directory, train, val, test)