import os
import hashlib
from datetime import datetime
import openpyxl
from openpyxl.styles import Font, Alignment
import logging
import cv2
from skimage.metrics import structural_similarity as ssim
from skimage.transform import resize
import hashlib
import numpy as np
import os
import logging
from pathlib import Path
from collections import defaultdict
import os
from collections import defaultdict
from openpyxl import Workbook
from openpyxl.styles import Font, Alignment
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading

IGNORED_PREFIXES = ('_ignore_', '.', '__',r'忽略')  # 可扩展忽略前缀

# 初始化日志配置
def setup_logger(log_file='app.log'):
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        handlers=[
            logging.FileHandler(log_file, encoding='utf-8'),  # 写入日志文件
            logging.StreamHandler()  # 同时输出到控制台
        ]
    )

# 提取文件的 MD5 值
def get_md5(file_path):
    hash_md5 = hashlib.md5()
    try:
        with open(file_path, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        logging.info(f"成功计算 MD5: {file_path}")
        return hash_md5.hexdigest()
    except Exception as e:
        logging.error(f"计算 MD5 失败: {file_path}, 错误: {e}")
        return ""
#比较源文件和目标文件的md5
def md5_similarity(base_file, target_file):
    return get_md5(base_file) == get_md5(target_file)

# 获取文件大小和创建时间
def get_file_info(file_path):
    try:
        stat = os.stat(file_path)
        size = stat.st_size
        ctime = datetime.fromtimestamp(stat.st_ctime).strftime('%Y-%m-%d %H:%M:%S')
        logging.info(f"获取文件信息成功: {file_path}")
        return size, ctime
    except Exception as e:
        logging.error(f"获取文件信息失败: {file_path}, 错误: {e}")
        return 0, ""

# 创建 Excel 表头
def create_excel_header(worksheet):
    headers = [
        '文件名', '大小 (bytes)', '创建时间', 'MD5',
        '均值哈希相似度', '差值哈希相似度', '像素匹配率', 'SSIM'
    ]
    worksheet.append(headers)
    # 设置表头样式
    for cell in worksheet[1]:
        cell.font = Font(bold=True)
        cell.alignment = Alignment(horizontal='center')
    logging.info("Excel 表头已创建")

# 写入文件信息到 Excel
def write_file_info_to_excel(worksheet, file_path):
    try:
        filename = os.path.basename(file_path)
        size, ctime = get_file_info(file_path)
        md5 = get_md5(file_path)
        worksheet.append([filename, size, ctime, md5])
        logging.info(f"成功写入文件信息: {file_path}")
    except Exception as e:
        logging.error(f"写入文件信息失败: {file_path}, 错误: {e}")

def create_excel_header(worksheet):
    headers = [
        '主路径','主文件名', 
        '比较文件路径','比较文件名',
        'MD5',
        '均值哈希相似度', 
        '差值哈希相似度', 
        '像素匹配率', 
        'SSIM'
    ]
    worksheet.append(headers)
    # 设置表头样式
    for cell in worksheet[1]:
        cell.font = Font(bold=True)
        cell.alignment = Alignment(horizontal='center')

def append_comparison_result(worksheet, base_file, target_file,
                             md5_value=None, ahash=None, dhash=None, ratio=None, ssim=None):
    base_filename = os.path.basename(base_file)
    base_path = os.path.dirname(base_file)
    target_filename = os.path.basename(target_file)
    target_path = os.path.dirname(target_file)
    worksheet.append([
        base_path, 
        base_filename, 
        target_path,
        target_filename,
        md5_value if md5_value is not None else '',
        ahash if ahash is not None else '',
        dhash if dhash is not None else '',
        ratio if ratio is not None else '',
        ssim if ssim is not None else ''
    ])

# 主函数：遍历目录，写入 Excel 并比较文件
def process_images_in_directory(directory, output_excel):
    imgMatcher = ImageMatcher()
    image_files = []
    
    for root_dir, dirs, files in os.walk(directory):
        # 记录并过滤忽略的目录
        ignored_dirs = [d for d in dirs if d.startswith(IGNORED_PREFIXES)]
        for dir_name in ignored_dirs:
            logging.info(f"忽略目录: {os.path.join(root_dir, dir_name)}")
        dirs[:] = [d for d in dirs if not d.startswith(IGNORED_PREFIXES)]

        # 处理当前目录下的文件
        for file in files:
            if file.startswith(IGNORED_PREFIXES):
                logging.info(f"忽略文件: {os.path.join(root_dir, file)}")
                continue

            if file.lower().endswith(('.jpg', '.png')):
                full_path = os.path.join(root_dir, file)
                image_files.append(full_path)

    if not image_files:
        logging.warning("未找到任何 JPG 或 PNG 文件。")
        print("未找到任何 JPG 或 PNG 文件。")
        return

    # 创建 Excel 工作簿和工作表
    wb = openpyxl.Workbook()
    ws = wb.active
    ws.title = "图像信息与相似度对比"
    create_excel_header(ws)

    comparison_count = 0  # 记录处理了多少组数据
    save_interval = 100  # 每100行保存一次

    # 两两配对比较（不重复）
    for i, base_file in enumerate(image_files):
        try:
            # 读取图像
            img1 = cv2.imdecode(np.fromfile(base_file, dtype=np.uint8), -1)
            img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)
            # 均值哈希
            hash1 = imgMatcher.aHash(img1)
            # 差值哈希
            hash1 = imgMatcher.dHash(img1)
        except Exception as e:
            logging.error(f"读取基准图像失败: {base_file}, 错误: {e}")
            continue
    
        for target_file in image_files[i + 1:]:  # 只比较后面的文件，避免重复
            logging.info(f"正在比较: {base_file} vs {target_file}")
            print(f"正在比较: {base_file} vs {target_file}")

            try:
                # 获取 MD5 相似度
                md5_value = md5_similarity(base_file, target_file)

                ahash, dhash, ratio, ssim_score = None, None, None, None
                if not md5_value:  # 如果 MD5 不同则继续比较图像
                    
                    img2 = cv2.imdecode(np.fromfile(target_file, dtype=np.uint8), -1)
                    img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)
                    
                    hash2 = imgMatcher.aHash(img2)
                    ahash = imgMatcher.cmpHash(hash1, hash2)/100
                    
                    hash2 = imgMatcher.dHash(img2)
                    dhash = imgMatcher.cmpHash(hash1, hash2)/100

                    # SSIM
                    (score, diff) = imgMatcher.calculate_ssim(img1, img2, channel_axis=2)
                    ssim_score = score
                    ratio = 0  # 示例值，可替换为像素匹配率计算逻辑

                # 写入比对结果到 Excel
                append_comparison_result(
                    ws,
                    base_file,
                    target_file,
                    md5_value=md5_value,
                    ahash=ahash,
                    dhash=dhash,
                    ratio=ratio,
                    ssim=ssim_score
                )
                comparison_count += 1

                # 定期保存
                if comparison_count % save_interval == 0:
                    wb.save(output_excel)
                    logging.info(f"已处理 {comparison_count} 组数据，Excel 已保存至 {output_excel}")

            except Exception as e:
                logging.error(f"比较失败: {base_file} vs {target_file}, 错误: {e}")

    # 最终保存
    wb.save(output_excel)
    logging.info(f"处理完成，最终结果已保存至 {output_excel}")
    print(f"处理完成，结果已保存至 {output_excel}")




# 全局结果缓存和锁
comparison_results = []
results_lock = threading.Lock()

def compare_pair(args):
    base_file, target_file, imgMatcher = args
    try:
        # 读取图像
        img1 = cv2.imdecode(np.fromfile(base_file, dtype=np.uint8), -1)
        img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)
        img2 = cv2.imdecode(np.fromfile(target_file, dtype=np.uint8), -1)
        img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)

        # 计算 MD5 相似度
        md5_value = get_md5(base_file) == get_md5(target_file)

        ahash, dhash, ssim_score = None, None, None
        if not md5_value:
            hash2 = imgMatcher.aHash(img2)
            ahash = imgMatcher.cmpHash(imgMatcher.aHash(img1), hash2) / 100

            hash2 = imgMatcher.dHash(img2)
            dhash = imgMatcher.cmpHash(imgMatcher.dHash(img1), hash2) / 100

            (score, diff) = imgMatcher.calculate_ssim(img1, img2, channel_axis=2)
            ssim_score = score

        # 构建结果
        result = {
            'base_file': base_file,
            'target_file': target_file,
            'md5': md5_value,
            'ahash': ahash,
            'dhash': dhash,
            'ssim': ssim_score
        }
        return result
    except Exception as e:
        logging.error(f"线程比较失败: {base_file} vs {target_file}, 错误: {e}")
        return None
import json
import os

CACHE_FILE = 'image_comparison_cache.json'

def load_cache():
    if os.path.exists(CACHE_FILE):
        with open(CACHE_FILE, 'r', encoding='utf-8') as f:
            return json.load(f)
    return {
        "md5_map": {},           # MD5 -> file_path
        "compared_pairs": [],    # [[md5_a, md5_b], ...]
        "processed_files": []    # 已处理过的文件路径
    }

def save_cache(cache):
    with open(CACHE_FILE, 'w', encoding='utf-8') as f:
        json.dump(cache, f, ensure_ascii=False, indent=2)
def process_images_in_directory(directory, output_excel):
    imgMatcher = ImageMatcher()
    image_files = []

    # Step 1: 加载缓存
    cache = load_cache()
    processed_files = set(cache['processed_files'])
    compared_pairs = set(tuple(sorted(pair)) for pair in cache['compared_pairs'])
    md5_map = dict(cache['md5_map'])  # MD5 -> 已存在的图像路径

    # Step 2: 收集所有图像文件
    for root_dir, dirs, files in os.walk(directory):
        dirs[:] = [d for d in dirs if not d.startswith(IGNORED_PREFIXES)]
        for file in files:
            if file.startswith(IGNORED_PREFIXES):
                continue
            if file.lower().endswith(('.jpg', '.png')):
                full_path = os.path.join(root_dir, file)
                if full_path in processed_files:
                    logging.info(f"已处理过: {full_path}，跳过")
                    continue
                image_files.append(full_path)

    if not image_files:
        logging.warning("未找到需要处理的新图像文件。")
        print("未找到需要处理的新图像文件。")
        return

    # Step 3: 创建 Excel 表头（如果不存在）
    wb = openpyxl.Workbook() if not os.path.exists(output_excel) else openpyxl.load_workbook(output_excel)
    ws = wb.active
    create_excel_header(ws)

    comparison_count = 0
    save_interval = 100

    # Step 4: 主循环处理图像
    for i, base_file in enumerate(image_files):
        try:
            base_md5 = get_md5(base_file)

            # 如果该图像已经处理过，或其 MD5 已存在（表示是重复图像），则跳过
            if base_file in processed_files or base_md5 in md5_map:
                continue

            # 加载图像
            img1 = cv2.imdecode(np.fromfile(base_file, dtype=np.uint8), -1)
            img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)
            hash1_a = imgMatcher.aHash(img1)
            hash1_d = imgMatcher.dHash(img1)

            # 记录当前图像
            md5_map[base_md5] = base_file
            processed_files.add(base_file)

        except Exception as e:
            logging.error(f"读取基准图像失败: {base_file}, 错误: {e}")
            continue

        # Step 5: 和后面的图像进行比较
        for target_file in image_files[i + 1:]:
            try:
                target_md5 = get_md5(target_file)

                # 如果目标图像已被处理过，则跳过
                if target_file in processed_files or target_md5 in md5_map:
                    continue

                # 检查是否已经比较过这对图像
                pair_key = tuple(sorted((base_md5, target_md5)))
                if pair_key in compared_pairs:
                    continue
                compared_pairs.add(pair_key)

                # 加载目标图像
                img2 = cv2.imdecode(np.fromfile(target_file, dtype=np.uint8), -1)
                img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)

                # 哈希比较
                hash2_a = imgMatcher.aHash(img2)
                ahash = imgMatcher.cmpHash(hash1_a, hash2_a) / 100

                hash2_d = imgMatcher.dHash(img2)
                dhash = imgMatcher.cmpHash(hash1_d, hash2_d) / 100

                # SSIM 比较
                (score, diff) = imgMatcher.calculate_ssim(img1, img2, channel_axis=2)
                ssim_score = score

                # 写入 Excel
                append_comparison_result(
                    ws,
                    base_file,
                    target_file,
                    md5_value=False,
                    ahash=ahash,
                    dhash=dhash,
                    ratio=0,
                    ssim=ssim_score
                )
                comparison_count += 1

                # 定期保存
                if comparison_count % save_interval == 0:
                    wb.save(output_excel)
                    logging.info(f"已处理 {comparison_count} 组数据，Excel 已保存至 {output_excel}")
                    save_cache({
                        "md5_map": md5_map,
                        "compared_pairs": list(compared_pairs),
                        "processed_files": list(processed_files)
                    })

            except Exception as e:
                logging.error(f"比较失败: {base_file} vs {target_file}, 错误: {e}")

        # 定期保存缓存
        if comparison_count % save_interval == 0:
            save_cache({
                "md5_map": md5_map,
                "compared_pairs": list(compared_pairs),
                "processed_files": list(processed_files)
            })

    # 最终保存 Excel 和缓存
    wb.save(output_excel)
    save_cache({
        "md5_map": md5_map,
        "compared_pairs": list(compared_pairs),
        "processed_files": list(processed_files)
    })

    logging.info(f"处理完成，结果已保存至 {output_excel}")
    print(f"处理完成，结果已保存至 {output_excel}")




# 忽略的目录前缀（与之前的规则一致）
IGNORED_PREFIXES = ('_ignore_', '.', '__', '忽略')  
#检查所有图像是否只存在于最后一级目录中。
#如果某个目录既包含图像文件又包含子目录，则记录错误并抛出异常。
def check_all_images_in_leaf_directories(root_dir):
    """
    检查所有图像是否只存在于最后一级目录中。
    如果某个目录既包含图像文件又包含子目录，则记录错误并抛出异常。
    
    :param root_dir: 要检查的根目录路径
    :return: True 表示通过检查；False 表示存在非法结构
    """
    has_invalid_structure = False

    for dirpath, dirnames, filenames in os.walk(root_dir):
        # 过滤掉忽略的目录
        filtered_dirnames = [
            d for d in dirnames if not d.startswith(IGNORED_PREFIXES)
        ]
        dirnames[:] = filtered_dirnames  # in-place 修改，控制 os.walk 的深入

        # 判断当前目录是否为最后一级目录（无子目录）
        has_subdirs = len(filtered_dirnames) > 0

        # 检查当前目录是否有图像文件
        image_files = [
            f for f in filenames
            if f.lower().endswith(('.jpg', '.png'))
            and not f.startswith(IGNORED_PREFIXES)
        ]

        if image_files and has_subdirs:
            error_msg = f"目录 '{dirpath}' 包含图像文件但不是最后一级目录，违反结构规范。"
            logging.error(error_msg)
            print(f"[ERROR] {error_msg}")
            has_invalid_structure = True

    if has_invalid_structure:
        error_summary = "检测到部分目录中图像文件与子目录共存，违反仅允许最后一级目录存放图片的规定。"
        logging.critical(error_summary)
        print(f"[CRITICAL] {error_summary}")
        return False

    logging.info("所有图像均位于最后一级目录中，结构符合规范。")
    return True

# 统计最后一级目录中的图片数量并写入Excel
def count_images_in_leaf_directories(directory,output_excel):
    dir_counter = defaultdict(int)
    dir_image_counter = defaultdict(int)
    
    # 创建Excel工作簿和工作表
    wb = Workbook()
    ws = wb.active
    ws.title = "目录统计"

    # 定义表头
    headers = ['全部目录名称', '最后一级目录名称', '图片数量']
    ws.append(headers)

    # 设置表头样式
    for cell in ws[1]:
        cell.font = Font(bold=True)
        cell.alignment = Alignment(horizontal='center')

    for dirpath, dirnames, filenames in os.walk(directory):
        # 过滤掉忽略的目录
        filtered_dirnames = [d for d in dirnames if not d.startswith(IGNORED_PREFIXES)]
        dirnames[:] = filtered_dirnames  # 控制 os.walk 深度

        # 判断是否为最后一级目录（无子目录）
        if not filtered_dirnames:
            leaf_dir_name = os.path.basename(dirpath)
            image_files = [
                f for f in filenames
                if f.lower().endswith(('.jpg', '.png'))
                and not f.startswith(('ignore_', '.', '__', '忽略'))
            ]
            image_count = len(image_files)            
            dir_counter[leaf_dir_name] += 1
            dir_image_counter[leaf_dir_name] += image_count
            
            # 写入当前目录的信息到Excel
            ws.append([dirpath, leaf_dir_name, dir_counter[leaf_dir_name], image_count])

    # 保存Excel文件
    wb.save(output_excel)
    logging.info(f"目录统计结果已保存至 {output_excel}")
    print(f"[INFO] 目录统计结果已保存至 {output_excel}")

    return dir_counter
class ImageMatcher:
    #封装ssi
    def calculate_ssim(self, imageA, imageB, channel_axis=None, target_size=(256, 256)):
        # 灰度图处理（避免通道过多影响性能）
        if len(imageA.shape) > 2:
            grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
        else:
            grayA = imageA.copy()

        if len(imageB.shape) > 2:
            grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
        else:
            grayB = imageB.copy()

        # 统一缩放尺寸
        grayA = cv2.resize(grayA, target_size, interpolation=cv2.INTER_AREA)
        grayB = cv2.resize(grayB, target_size, interpolation=cv2.INTER_AREA)

        # 确保图像尺寸相同
        if grayA.shape != grayB.shape:
            grayB = resize(grayB, grayA.shape[:2], preserve_range=True).astype(grayA.dtype)

        """计算两个图像之间的 SSIM，自动处理窗口大小和通道"""
        # 确保图像尺寸足够大
        min_size = min(grayA.shape[:2])
        win_size = min(7, min_size)
        if win_size < 3:
            # 图像尺寸过小，考虑缩放或其他处理
            raise ValueError("图像尺寸过小，无法计算 SSIM")
        if win_size % 2 == 0:
            win_size -= 1  # 确保窗口大小是奇数

        # 使用灰度图进行 SSIM 计算，设置 multichannel=False
        return ssim(grayA, grayB,
                    full=True,
                    multichannel=False,
                    win_size=win_size)
    
    def dHash(self,img):
        # 缩放8*8
        img = cv2.resize(img, (9, 8), interpolation=cv2.INTER_CUBIC)
        # 转换灰度图
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        hash_str = ''
        # 每行前一个像素大于后一个像素为1，相反为0，生成哈希
        for i in range(8):
            for j in range(8):
                if gray[i, j] > gray[i, j + 1]:
                    hash_str = hash_str + '1'
                else:
                    hash_str = hash_str + '0'
        return hash_str
    def aHash(self,img):
        # 缩放为8*8
        img = cv2.resize(img, (8, 8), interpolation=cv2.INTER_CUBIC)
        # 转换为灰度图
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # s为像素和初值为0，hash_str为hash值初值为''
        s = 0
        hash_str = ''
        # 遍历累加求像素和
        for i in range(8):
            for j in range(8):
                s = s + gray[i, j]
        # 求平均灰度
        avg = s / 64
        # 灰度大于平均值为1相反为0生成图片的hash值
        for i in range(8):
            for j in range(8):
                if gray[i, j] > avg:
                    hash_str = hash_str + '1'
                else:
                    hash_str = hash_str + '0'
        return hash_str
    def cmpHash(self,hash1, hash2):
        n = 0
        # hash长度不同则返回-1代表传参出错
        if len(hash1) != len(hash2):
            return -1
        # 遍历判断
        for i in range(len(hash1)):
            # 不相等则n计数+1，n最终为相似度
            if hash1[i] != hash2[i]:
                n = n + 1
        return n    


# 启动函数调用
if __name__ == '__main__':
    #设置探测目录名称
    directory_path = r"D:\民政局-适老化改造-验收资料交给审计"
    output_excel_path = directory_path + r"\image_comparison.xlsx"
    output_dir_excel_path = directory_path + r"\dir_count.xlsx"
    log_file = directory_path + r"\app.log"

    # 删除旧的日志文件（如果存在）
    if os.path.exists(log_file):
        os.remove(log_file)
        logging.info(f"旧日志文件已删除: {log_file}")

    # 删除旧的 Excel 文件（如果存在）
    if os.path.exists(output_excel_path):
        os.remove(output_excel_path)
        logging.info(f"旧 Excel 文件已删除: {output_excel_path}")
        
    # 删除旧的 Excel 文件（如果存在）
    if os.path.exists(output_dir_excel_path):
        os.remove(output_dir_excel_path)
        logging.info(f"旧 Excel 文件已删除: {output_dir_excel_path}")

    #设置日志文件
    setup_logger(log_file)

    if not check_all_images_in_leaf_directories(directory_path):
        print("检测到非法目录结构，请检查目录结构是否符合要求。")
        exit()
    else:
        count_images_in_leaf_directories(directory_path,output_dir_excel_path)
        exit()

    # 删除旧的 Excel 文件（如果存在）
    if os.path.exists(output_excel_path):
        os.remove(output_excel_path)
        logging.info(f"旧 Excel 文件已删除: {output_excel_path}")
   
    logging.info("开始处理图像文件...")
    process_images_in_directory(directory_path, output_excel_path)
    logging.info("图像处理及日志记录已完成。")