import os
import random
import shutil
import hashlib
from pathlib import Path
from PIL import Image
import imagehash


def calculate_file_hash(filepath):
    """计算文件的MD5哈希值"""
    with open(filepath, 'rb') as f:
        return hashlib.md5(f.read()).hexdigest()


def calculate_phash(image_path):
    """计算图像的感知哈希(适合检测相似图像)"""
    try:
        with Image.open(image_path) as img:
            return str(imagehash.phash(img))
    except:
        return None


def process_dataset(original_image_dir, original_label_dir, output_image_dir, output_label_dir,
                    num_samples, global_hashes, global_phashes):
    """
    处理单个数据集(训练集或验证集)

    返回:
        selected_samples: 选中的样本列表
        new_hashes: 新增的文件哈希集合
        new_phashes: 新增的感知哈希集合
    """
    # 创建输出目录
    Path(output_image_dir).mkdir(parents=True, exist_ok=True)
    Path(output_label_dir).mkdir(parents=True, exist_ok=True)

    # 获取所有图片文件
    image_files = [f for f in os.listdir(original_image_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png'))]

    # 用于去重的集合
    new_hashes = set()
    new_phashes = set()
    valid_samples = []

    print(f"正在处理 {original_image_dir}...")
    for img_file in image_files:
        img_path = os.path.join(original_image_dir, img_file)
        base_name = os.path.splitext(img_file)[0]
        label_path = os.path.join(original_label_dir, base_name + '.txt')

        # 检查标签文件是否存在
        if not os.path.exists(label_path):
            continue

        # 计算哈希值
        file_hash = calculate_file_hash(img_path)
        phash = calculate_phash(img_path)

        # 检查全局和当前数据集的重复
        if (file_hash in global_hashes or file_hash in new_hashes or
                (phash and (phash in global_phashes or phash in new_phashes))):
            continue

        new_hashes.add(file_hash)
        if phash:
            new_phashes.add(phash)

        valid_samples.append((img_path, label_path, base_name))

    print(f"发现 {len(valid_samples)} 个唯一图像(去重后)")

    # 检查是否有足够的样本
    if len(valid_samples) < num_samples:
        print(f"警告: 只有 {len(valid_samples)} 个唯一样本可用，少于请求的 {num_samples} 个")
        num_samples = len(valid_samples)

    # 随机选择样本
    selected_samples = random.sample(valid_samples, num_samples)

    # 复制文件
    for img_path, label_path, base_name in selected_samples:
        ext = os.path.splitext(img_path)[1]
        img_dst = os.path.join(output_image_dir, base_name + ext)
        label_dst = os.path.join(output_label_dir, base_name + '.txt')

        shutil.copy2(img_path, img_dst)
        shutil.copy2(label_path, label_dst)

    print(f"成功复制 {len(selected_samples)} 个样本到 {output_image_dir}")
    return selected_samples, new_hashes, new_phashes


def select_datasets():
    """同时筛选训练集和验证集"""
    # 全局哈希集合，确保训练集和验证集之间不重复
    global_hashes = set()
    global_phashes = set()

    # 训练集参数
    train_image_dir = "yolo_data/images/train"
    train_label_dir = "yolo_data/labels/train"
    train_output_image = "yolo_data_simpler/images/train"
    train_output_label = "yolo_data_simpler/labels/train"
    train_samples = 4000  # 训练集样本数

    # 验证集参数
    val_image_dir = "yolo_data/images/val"
    val_label_dir = "yolo_data/labels/val"
    val_output_image = "yolo_data_simpler/images/val"
    val_output_label = "yolo_data_simpler/labels/val"
    val_samples = 1000  # 验证集样本数

    # 先处理训练集
    train_selected, train_hashes, train_phashes = process_dataset(
        train_image_dir, train_label_dir,
        train_output_image, train_output_label,
        train_samples, global_hashes, global_phashes
    )

    # 更新全局哈希集合
    global_hashes.update(train_hashes)
    global_phashes.update(train_phashes)

    # 然后处理验证集
    val_selected, val_hashes, val_phashes = process_dataset(
        val_image_dir, val_label_dir,
        val_output_image, val_output_label,
        val_samples, global_hashes, global_phashes
    )

    # 最终统计
    print("\n处理完成:")
    print(f"- 训练集: {len(train_selected)} 个样本")
    print(f"- 验证集: {len(val_selected)} 个样本")
    print(f"- 总样本数: {len(train_selected) + len(val_selected)}")
    print(f"- 训练集和验证集之间的重复图像: {len(global_hashes & val_hashes)}")  # 应该为0


if __name__ == "__main__":
    select_datasets()