import os
import hashlib
from pathlib import Path
import requests

def calculate_md5(content):
    return hashlib.md5(content).hexdigest()


def is_image_unique(save_dir, image_md5):
    # 检查是否已存在相同 MD5 的图片
    for existing_file in Path(save_dir).glob("*"):
        if existing_file.is_file():
            with open(existing_file, "rb") as f:
                existing_md5 = calculate_md5(f.read())
            if existing_md5 == image_md5:
                print(f"重复图片已跳过：{save_dir}（MD5: {image_md5}）")
                return False
    return True

# 示例用法
target_dir = "img"  # 你的目标文件夹
res = requests.get("https://sns-webpic-qc.xhscdn.com/202505191826/a7f967697dcb773827d51f3fcb98557b/1040g2sg3148muf6g6i705n6e82tlr938nn164c0!nd_dft_wlteh_webp_3")
md5 = calculate_md5(res.content)

flag = is_image_unique("img/",md5)
if flag:
    with open(target_dir + "/" + md5 + '.jpg', "wb") as f:
        f.write(res.content)
print(flag)

# if is_image_unique(target_dir, new_image_path):
#     # 保存新图片（可自定义文件名逻辑）
#     unique_filename = f"image_{calculate_md5(new_image_path)}.jpg"  # 用哈希命名
#     os.rename(new_image_path, os.path.join(target_dir, unique_filename))
#     print(f"新图片已保存：{unique_filename}")
# else:
#     os.remove(new_image_path)  # 直接删除重复文件