import os
import random
import shutil
import json
from pathlib import Path

# 源数据集路径
DATASET_PATH = "source"
# 输出路径 - 直接使用当前目录
OUTPUT_PATH = "."
# 训练集和验证集的比例
TRAIN_RATIO = 0.85

def collect_dataset_info(dataset_path):
    """收集数据集信息，包括所有图片路径和类别信息"""
    all_images = []
    all_classes = set()
    
    # 遍历数据集目录
    for root, dirs, files in os.walk(dataset_path):
        # 只处理包含图片的目录
        image_files = [f for f in files if f.endswith(('.jpg', '.jpeg', '.png'))]
        if not image_files:
            continue
        
        # 获取当前目录的相对路径
        rel_path = os.path.relpath(root, dataset_path)
        if rel_path == '.':
            continue
            
        # 生成类别名称，按照"抖音/首页/点赞"的格式
        class_name = rel_path.replace('\\', '/').strip()
        
        # 将类别添加到集合中
        all_classes.add(class_name)
        
        # 收集图片和对应的标注文件
        for img_file in image_files:
            img_path = os.path.join(root, img_file)
            txt_file = os.path.join(root, os.path.splitext(img_file)[0] + '.txt')
            
            # 确保标注文件存在
            if os.path.exists(txt_file):
                # 保存图片路径、标注路径和类别信息
                all_images.append({
                    'img_path': img_path,
                    'txt_path': txt_file,
                    'class_name': class_name,
                    'rel_dir': rel_path
                })
    
    return all_images, list(all_classes)

def create_dataset_structure(output_path):
    """创建数据集目录结构"""
    # 创建训练和验证集目录
    train_dir = os.path.join(output_path, "train")
    val_dir = os.path.join(output_path, "val")
    
    # 创建图片和标签目录
    train_img_dir = os.path.join(train_dir, "images")
    train_label_dir = os.path.join(train_dir, "labels")
    val_img_dir = os.path.join(val_dir, "images")
    val_label_dir = os.path.join(val_dir, "labels")
    
    # 如果目录已经存在，先清空它们
    for directory in [train_img_dir, train_label_dir, val_img_dir, val_label_dir]:
        if os.path.exists(directory):
            for file in os.listdir(directory):
                file_path = os.path.join(directory, file)
                if os.path.isfile(file_path):
                    os.remove(file_path)
        else:
            os.makedirs(directory, exist_ok=True)
    
    return {
        'train_img': train_img_dir,
        'train_label': train_label_dir,
        'val_img': val_img_dir,
        'val_label': val_label_dir
    }

def split_and_copy_dataset(all_images, directories, class_dict, train_ratio=0.85):
    """分割数据集并复制文件到对应目录"""
    # 随机打乱图片列表
    random.shuffle(all_images)
    
    # 计算训练集大小
    train_size = int(len(all_images) * train_ratio)
    
    # 分割为训练集和验证集
    train_images = all_images[:train_size]
    val_images = all_images[train_size:]
    
    print(f"总图片数: {len(all_images)}")
    print(f"训练集图片数: {len(train_images)}")
    print(f"验证集图片数: {len(val_images)}")
    
    # 复制训练集文件
    for item in train_images:
        # 获取文件名
        img_filename = os.path.basename(item['img_path'])
        txt_filename = os.path.basename(item['txt_path'])
        
        # 复制图片
        dst_img = os.path.join(directories['train_img'], img_filename)
        shutil.copy2(item['img_path'], dst_img)
        
        # 读取原标注文件内容
        with open(item['txt_path'], 'r', encoding='utf-8') as f:
            lines = f.readlines()
        
        # 创建新的标注文件（使用追加模式）
        dst_txt = os.path.join(directories['train_label'], txt_filename)
        with open(dst_txt, 'a', encoding='utf-8') as f:
            for line in lines:
                if line.strip():
                    parts = line.strip().split()
                    # YOLO格式: class_id x_center y_center width height
                    if parts and len(parts) >= 5:
                        # 更新类别ID
                        class_id = class_dict[item['class_name']]
                        new_line = f"{class_id} {' '.join(parts[1:])}\n"
                        f.write(new_line)
    
    # 复制验证集文件
    for item in val_images:
        # 获取文件名
        img_filename = os.path.basename(item['img_path'])
        txt_filename = os.path.basename(item['txt_path'])
        
        # 复制图片
        dst_img = os.path.join(directories['val_img'], img_filename)
        shutil.copy2(item['img_path'], dst_img)
        
        # 读取原标注文件内容
        with open(item['txt_path'], 'r', encoding='utf-8') as f:
            lines = f.readlines()
        
        # 创建新的标注文件（使用追加模式）
        dst_txt = os.path.join(directories['val_label'], txt_filename)
        with open(dst_txt, 'a', encoding='utf-8') as f:
            for line in lines:
                if line.strip():
                    parts = line.strip().split()
                    # YOLO格式: class_id x_center y_center width height
                    if parts and len(parts) >= 5:
                        # 更新类别ID
                        class_id = class_dict[item['class_name']]
                        new_line = f"{class_id} {' '.join(parts[1:])}\n"
                        f.write(new_line)
    
    return train_images, val_images

def create_yaml_file(output_path, classes):
    """创建YAML配置文件"""
    # 创建类别字典
    class_dict = {class_name: i for i, class_name in enumerate(classes)}
    
    yaml_content = {
        'path': os.path.abspath(output_path),
        'train': 'train/images',
        'val': 'val/images',
        'names': {str(i): cls for i, cls in enumerate(classes)}
    }
    
    # 创建JSON格式的配置文件
    yaml_path = os.path.join(output_path, 'dataset.yaml')
    with open(yaml_path, 'w', encoding='utf-8') as f:
        f.write("path: " + os.path.abspath(output_path) + "\n")
        f.write("train: train/images\n")
        f.write("val: val/images\n")
        f.write("names:\n")
        for i, cls in enumerate(classes):
            f.write(f"  {i}: '{cls}'\n")
    
    print(f"YAML配置文件已创建: {yaml_path}")
    
    # 创建classes.txt文件
    classes_path = os.path.join(output_path, 'classes.txt')
    with open(classes_path, 'w', encoding='utf-8') as f:
        for cls in classes:
            f.write(f"{cls}\n")
    
    print(f"Classes.txt文件已创建: {classes_path}")
    
    # 在train/labels和val/labels目录中各添加一份classes.txt文件
    train_labels_dir = os.path.join(output_path, 'train', 'labels')
    val_labels_dir = os.path.join(output_path, 'val', 'labels')
    
    # 确保目录存在
    os.makedirs(train_labels_dir, exist_ok=True)
    os.makedirs(val_labels_dir, exist_ok=True)
    
    # 创建train/labels/classes.txt
    train_classes_path = os.path.join(train_labels_dir, 'classes.txt')
    with open(train_classes_path, 'w', encoding='utf-8') as f:
        for cls in classes:
            f.write(f"{cls}\n")
    
    # 创建val/labels/classes.txt
    val_classes_path = os.path.join(val_labels_dir, 'classes.txt')
    with open(val_classes_path, 'w', encoding='utf-8') as f:
        for cls in classes:
            f.write(f"{cls}\n")
    
    print(f"Classes.txt文件已添加到训练集和验证集标签目录")
    
    return class_dict

def main():
    # 收集数据集信息
    print("正在收集数据集信息...")
    all_images, all_classes = collect_dataset_info(DATASET_PATH)
    
    if not all_images:
        print("错误: 未找到有效的图片和标注文件.")
        return
    
    # 创建数据集目录结构
    print("正在创建数据集目录结构...")
    directories = create_dataset_structure(OUTPUT_PATH)
    
    # 创建YAML配置文件
    print("正在创建YAML配置文件...")
    class_dict = create_yaml_file(OUTPUT_PATH, all_classes)
    
    # 分割并复制数据集
    print("正在分割并复制数据集...")
    split_and_copy_dataset(all_images, directories, class_dict, TRAIN_RATIO)
    
    print("数据集划分完成!")

if __name__ == "__main__":
    main() 