#!/usr/bin/env python
# A multi-process version of duplicate file remover with progress display
import hashlib
import os
import optparse
import sys
import time
import threading
from multiprocessing import Pool, cpu_count, Manager
from functools import partial


def md5(f):
    """Calculate md5 checksum for a given file"""
    try:
        md5Hash = hashlib.md5()
        with open(f, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                md5Hash.update(chunk)
        return md5Hash.hexdigest()
    except Exception as e:
        print(f"Error calculating MD5 for {f}: {e}", file=sys.stderr)
        return None


def process_file(file_path, counter):
    """Process a single file: calculate MD5 and get size, with progress tracking"""
    try:
        file_md5 = md5(file_path)
        
        # 增加计数器，处理Manager.Value的情况
        if hasattr(counter, 'get_lock'):
            with counter.get_lock():
                counter.value += 1
        else:
            # 对于Manager创建的Value，直接修改值（已由Manager处理同步）
            counter.value += 1
            
        if file_md5:
            size = os.path.getsize(file_path)
            return (str(file_md5) + str(size), file_path)
        return None
    except Exception as e:
        print(f"Error processing {file_path}: {e}", file=sys.stderr)
        return None


def progress_printer(counter, total_files, stop_event):
    """定时打印进度的线程函数，每5秒一次"""
    while not stop_event.is_set():
        time.sleep(5)
        # 确保不超过总文件数
        current = min(counter.value, total_files)
        print(f"\rCalculated MD5 for {current}/{total_files} files...", end="", flush=True)
    # 最后打印一次完整进度
    print(f"\rCalculated MD5 for {total_files}/{total_files} files... Done!", flush=True)


def rm_dup(path, exps):
    """Remove duplicate files using multi-processing for MD5 calculation"""
    if not os.path.isdir(path):
        print("Specified directory does not exist!")
        return

    md5_dict = {}
    exp_list = exps.split("-") if exps else []
    
    print("Collecting files...")
    # 收集所有文件并保持顺序
    all_files = []
    for root, _, files in os.walk(path):
        for f in sorted(files):
            file_path = os.path.join(root, f)
            if any(file_path.endswith(ext) for ext in exp_list):
                continue
            all_files.append(file_path)
    
    total_files = len(all_files)
    print(f"Found {total_files} files to process.")
    print(f"Using {cpu_count()} processes to calculate hashes...")
    
    # 使用Manager创建共享计数器和停止事件
    manager = Manager()
    counter = manager.Value('i', 0)  # 'i' 表示整数类型
    stop_event = threading.Event()
    
    # 启动进度打印线程
    progress_thread = threading.Thread(
        target=progress_printer,
        args=(counter, total_files, stop_event),
        daemon=True
    )
    progress_thread.start()
    
    # 使用多进程计算哈希
    try:
        with Pool(processes=cpu_count()) as pool:
            # 使用partial传递共享计数器
            process_func = partial(process_file, counter=counter)
            results = pool.map(process_func, all_files)
    finally:
        # 通知进度线程停止
        stop_event.set()
        progress_thread.join()
        manager.shutdown()
    
    # 处理结果并构建md5_dict
    for result in results:
        if result:
            file_comb, file_path = result
            if file_comb in md5_dict:
                md5_dict[file_comb].append(file_path)
            else:
                md5_dict[file_comb] = [file_path]
    
    # 过滤出只有一个文件的条目（无重复）
    md5_dict = {k: v for k, v in md5_dict.items() if len(v) > 1}
    
    if not md5_dict:
        print("No duplicate files found.")
        return
    
    print(f"\nDone! Following files will be deleted{len(md5_dict)}:\n")
    files_to_delete = []
    for key in md5_dict:
        print(f"{md5_dict[key]} is dump")
        # 保留第一个文件，其余标记为删除
        for item in md5_dict[key][1:]:
            files_to_delete.append(item)
    
    confirm = input("\nEnter (y)es to confirm operation or anything else to abort: ").lower()
    if confirm not in ("y", "yes"):
        print("Operation cancelled by user. Exiting...")
        return
    
    print("Deleting...")
    deleted_count = 0
    for file_path in files_to_delete:
        try:
            os.remove(file_path)
            deleted_count += 1
        except Exception as e:
            print(f"Error deleting {file_path}: {e}", file=sys.stderr)
    
    print(f"Done! Deleted {deleted_count} duplicate files...")


if __name__ == "__main__":
    print("    ##########Multi-process Duplicate File Remover#########")
    print("    #                      Coded by monrocoury (modified)               #")
    print("    #              Uses multiple processes for faster processing        #")
    print("    #        Identifies duplicates by MD5 checksum and file size       #")
    print("    ###################################################################")
    
    parser = optparse.OptionParser(
        "usage: python multi_rm_dup.py -p <target path> -e <file extensions to ignore separated by ->\n"
        "example: python3 multi_rm_dup.py -p /Users/username/Downloads -e .py-.cpp-.txt"
    )
    parser.add_option(
        "-p", dest="target_path", type="string", help="provide target path"
    )
    parser.add_option(
        "-e",
        dest="ext2ignore",
        type="string",
        help="(optional) provide file extensions to ignore separated by - eg: -e .py-.doc",
    )
    
    (options, args) = parser.parse_args()
    p = options.target_path
    e = options.ext2ignore

    if not p:
        print("Please provide a target path using -p option")
        sys.exit(1)
        
    rm_dup(p, e)
