import os
import cv2
import sys
import numpy as np
import shutil
from tqdm import tqdm
from PIL import Image
import imagehash

# 从命令行参数读取源目录
if len(sys.argv) < 2:
    print("使用方法: python script.py <source_dir> [遮挡标志(1或0)] [阈值(默认20)]")
    sys.exit(1)

source_dir = sys.argv[1]  # 输入文件夹
overlay_flag = int(sys.argv[2]) if len(sys.argv) > 2 else 0  # 获取遮挡标志，默认为 0
face_threshold_percentage = float(sys.argv[3]) if len(sys.argv) > 3 else 0  # 设置脸部占比阈值，默认为 0%

out_dir = os.path.join(source_dir, "__out__")  # 输出文件夹
to_dir = os.path.join(out_dir, '已识别到的')  # 检测到人脸且小于阈值的文件夹
threshold_dir = to_dir  # 检测到人脸且小于阈值的文件夹
if int(face_threshold_percentage) > 0:
    to_dir = os.path.join(out_dir, f'脸小于{int(face_threshold_percentage)}%的')
    threshold_dir = os.path.join(out_dir, f'脸大于{int(face_threshold_percentage)}%的')  # 脸部占比大于阈值的文件夹
noto_dir = os.path.join(out_dir, '未能识别的')  # 未检测到人脸的输出文件夹
similar_dir = os.path.join(out_dir, '相似的')  # 相似图片文件夹

# 确保目标文件夹存在
for directory in [to_dir, noto_dir, threshold_dir, similar_dir]:
    os.makedirs(directory, exist_ok=True)

# 加载 OpenCV 的 DNN 模型 (Caffe)
prototxt_path = "deploy.prototxt"
caffemodel_path = "res10_300x300_ssd_iter_140000.caffemodel"
net = cv2.dnn.readNetFromCaffe(prototxt_path, caffemodel_path)

# 读取支持中文路径的图片
def imread_with_chinese_path(path):
    img_data = np.fromfile(path, dtype=np.uint8)
    img = cv2.imdecode(img_data, cv2.IMREAD_COLOR)
    return img

# 在人脸上加遮挡图像
def overlay_face(img, box, overlay):
    (x, y, x1, y1) = box.astype("int")
    margin = 70
    x = max(0, x - margin)
    y = max(0, y - margin)
    x1 = min(img.shape[1], x1 + margin)
    y1 = min(img.shape[0], y1 + margin)
    face_w = x1 - x
    face_h = y1 - y
    resized_overlay = cv2.resize(overlay, (face_w, face_h))
    overlay_rgb = resized_overlay[:, :, :3]
    overlay_alpha = resized_overlay[:, :, 3] / 255.0  # Alpha 通道归一化
    roi = img[y:y1, x:x1]
    for c in range(3):
        roi[:, :, c] = (overlay_alpha * overlay_rgb[:, :, c] + (1 - overlay_alpha) * roi[:, :, c])
    img[y:y1, x:x1] = roi

# 加载遮挡用的 PNG 图片（带透明背景的遮挡图片）
face_mask_pic = "face_mask.png"
overlay_image = cv2.imread(face_mask_pic, cv2.IMREAD_UNCHANGED)
# 计算图片的感知哈希值 (pHash)
def calculate_image_hash(image_path):
    try:
        image = Image.open(image_path)
        return imagehash.phash(image)
    except OSError as e:
        print(f"无法处理图像文件 {image_path}: {e}")
        return None  # 返回 None 以便后续处理时跳过此图像


 

# 计算两个图片哈希值之间的相似度，返回汉明距离
def calculate_similarity(hash1, hash2):
    return 1 - (hash1 - hash2) / len(hash1.hash) ** 2  # 相似度 = 1 - 汉明距离

# 用于存储图片哈希值的集合
image_hashes = {}

# 记录已处理文件的函数
def log_processed_file(file_path, processed_files_path):
    with open(processed_files_path, 'a', encoding='utf-8') as f:
        f.write(file_path + '\n')

# 创建相似图片列表文件的函数
def log_similar_image(image1, image2, similarity, similar_list_path):
    with open(similar_list_path, 'a', encoding='utf-8') as f:
        f.write(f"{image1} -> {image2}, 相似度: {similarity:.2f}\n")

# 处理进度条
total_files = sum([len(files) for r, d, files in os.walk(source_dir)])
processed_file_path = os.path.join(source_dir, 'processed_files.txt')
success_file_path = os.path.join(source_dir, 'success.txt')
similar_list_path = os.path.join(similar_dir, 'list.txt')  # 相似图片列表文件

# 初始化计数器
total_images = 0
masked_images = 0
undetected_faces = 0
similar_images_count = 0

# 处理进度条
with tqdm(total=total_files, desc="处理进度", unit="file", ncols=100) as pbar:
    for root, dirs, files in os.walk(source_dir):
        for filename in files:
            if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')):
                img_path = os.path.join(root, filename)
                if img_path.startswith(out_dir):
                    pbar.update(1)
                    continue

                # 增加总图片数量
                total_images += 1
                
                # 读取图像
                img = imread_with_chinese_path(img_path)
                if img is None:
                    print(f"无法加载图像: {img_path}")
                    pbar.update(1)
                    continue

                h, w = img.shape[:2]
                blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
                net.setInput(blob)
                detections = net.forward()

                face_detected = False
                max_face_width = 0  # 初始化最大脸部宽度
                for i in range(detections.shape[2]):
                    confidence = detections[0, 0, i, 2]
                    if confidence > 0.5:
                        face_detected = True
                        box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                        (x, y, x1, y1) = box.astype("int")
                        face_width = x1 - x
                        max_face_width = max(max_face_width, face_width)
                        if overlay_flag == 1:
                            overlay_face(img, box, overlay_image)
                            masked_images += 1  # 增加遮挡图片计数

                if face_detected:
                    face_width_percentage = (max_face_width / w) * 100
                    tqdm.write(f"最大脸宽占图片宽度的 {face_width_percentage:.2f}%")

                relative_path = os.path.relpath(root, source_dir)

                if face_detected:
                    if face_width_percentage > face_threshold_percentage:
                        output_subdir = os.path.join(threshold_dir, relative_path)
                    else:
                        output_subdir = os.path.join(to_dir, relative_path)
                else:
                    output_subdir = os.path.join(noto_dir, relative_path)
                    undetected_faces += 1  # 增加未能检测到人脸的计数

                os.makedirs(output_subdir, exist_ok=True)
                output_path = os.path.join(output_subdir, filename)

                # 检查图片是否与之前的图片相似
         
                image_hash = calculate_image_hash(img_path)
                if image_hash is None:
                    pbar.update(1)
                    continue  # 跳过损坏的文件

                similar_image_found = False
                for saved_hash, saved_image_path in image_hashes.items():
                    similarity = calculate_similarity(image_hash, saved_hash)
                    if similarity > 0.85:  # 设定相似度阈值，例如 85%
                        similar_output_path = os.path.join(similar_dir, filename)
                        shutil.copy(img_path, similar_output_path)
                        tqdm.write(f"图片相似: {img_path} 和 {saved_image_path} (相似度: {similarity:.2f})")
                        # 记录相似图片及相似度到 list.txt 文件
                        log_similar_image(img_path, saved_image_path, similarity, similar_list_path)
                        similar_images_count += 1  # 增加相似图片计数
                        similar_image_found = True
                        break
                        
                if not similar_image_found:
                    image_hashes[image_hash] = img_path
                    if face_detected:
                        cv2.imencode('.jpg', img)[1].tofile(output_path)
                    else:
                        shutil.copy(img_path, output_path)

                log_processed_file(img_path, processed_file_path)
                pbar.update(1)

# 重命名处理文件为 success.txt
if os.path.exists(processed_file_path):
    os.rename(processed_file_path, success_file_path)

# 写入统计信息到 success.txt
with open(success_file_path, 'a', encoding='utf-8') as f:
    f.write(f"\n处理统计:\n")
    f.write(f"总共处理了 {total_images} 张图片\n")
    f.write(f"遮挡了 {masked_images} 张图片\n")
    f.write(f"未能识别到人脸的图片: {undetected_faces} 张\n")
    f.write(f"相似的图片: {similar_images_count} 对\n")

print(f"处理完成！所有图像已保存到目标目录。\nsuccess.txt 文件已生成于: {success_file_path}")
