import os
import shutil
from collections import defaultdict
from PIL import Image
import hashlib


# 定义文件夹路径
images_folder = r'D:\上过的课\2024-2025-1-大四上\毕设\datasets\sum\images'
labels_folder = r'D:\上过的课\2024-2025-1-大四上\毕设\datasets\sum\labels'


# 任务 1: 去除没有任何标签的图片和对应的标注文件
def remove_empty_images_and_labels(log_file):
    for filename in os.listdir(labels_folder):
        if filename.endswith('.txt'):
            txt_file_path = os.path.join(labels_folder, filename)
            # 检查 txt 文件是否为空
            if os.path.getsize(txt_file_path) == 0:
                # 获取对应的 jpg 文件名
                image_name = os.path.splitext(filename)[0] + '.jpg'
                image_path = os.path.join(images_folder, image_name)
                # 检查对应的 jpg 文件是否存在
                if os.path.exists(image_path):
                    # 删除 jpg 文件和对应的 txt 文件
                    os.remove(image_path)
                    os.remove(txt_file_path)
                    log_message = f"Removed {image_name} and {filename} due to empty label."
                    print(log_message)
                    log_file.write(log_message + '\n')


# 计算图片的哈希值
def calculate_image_hash(image_path):
    with Image.open(image_path) as img:
        img = img.convert('RGB')
        img = img.resize((32, 32), Image.LANCZOS)
        img_bytes = img.tobytes()
        hash_object = hashlib.sha256(img_bytes)
        return hash_object.hexdigest()


# 任务 2: 去除重复的图片及其标注文件（保留标签数量最多的）
def remove_duplicate_images_and_labels(log_file):
    hash_to_images = defaultdict(list)
    # 遍历图片文件夹，计算每个图片的哈希值
    for filename in os.listdir(images_folder):
        if filename.endswith('.jpg'):
            image_path = os.path.join(images_folder, filename)
            image_hash = calculate_image_hash(image_path)
            hash_to_images[image_hash].append(filename)

    # 处理每个哈希值对应的图片列表
    for image_list in hash_to_images.values():
        if len(image_list) > 1:
            label_counts = []
            for image_name in image_list:
                base_name = os.path.splitext(image_name)[0]
                txt_file_path = os.path.join(labels_folder, base_name + '.txt')
                if os.path.exists(txt_file_path):
                    with open(txt_file_path, 'r') as f:
                        lines = f.readlines()
                        label_counts.append(len(lines))
                else:
                    label_counts.append(0)

            # 输出重复图片名及其标签数
            log_message = "Duplicate images and their label counts:\n"
            for i, image_name in enumerate(image_list):
                log_message += f"{image_name}: {label_counts[i]} labels\n"
            print(log_message)
            log_file.write(log_message)

            # 找到标签数量最多的图片索引
            max_count_index = label_counts.index(max(label_counts))
            # 保留标签数量最多的图片和标注文件，删除其他的
            for i, image_name in enumerate(image_list):
                if i != max_count_index:
                    base_name = os.path.splitext(image_name)[0]
                    image_path = os.path.join(images_folder, image_name)
                    txt_file_path = os.path.join(labels_folder, base_name + '.txt')
                    if os.path.exists(image_path):
                        os.remove(image_path)
                    if os.path.exists(txt_file_path):
                        os.remove(txt_file_path)
                    log_message = f"Removed {image_name} and {base_name}.txt due to duplication."
                    print(log_message)
                    log_file.write(log_message + '\n')


if __name__ == "__main__":
    with open('log1.txt', 'w') as log_file:
        remove_empty_images_and_labels(log_file)
        remove_duplicate_images_and_labels(log_file)