import os
import sys
import hashlib
import shutil
from optparse import OptionParser
from collections import defaultdict
from datetime import datetime

def get_files_size(directory, min_size, exclude_patterns=None):
    """读取指定目录下所有文件的大小，返回大小到文件路径列表的字典"""
    if not os.path.isdir(directory):
        print(f"错误: 目录 '{directory}' 不存在或不是一个有效的目录")
        return None
    
    exclude_patterns = exclude_patterns or []
    file_sizes = defaultdict(list)
    
    for root, dirs, files in os.walk(directory):
        # 过滤要排除的目录
        dirs[:] = [d for d in dirs if not any(pattern in d for pattern in exclude_patterns)]
        
        for file in files:
            # 过滤要排除的文件
            if any(pattern in file for pattern in exclude_patterns):
                continue
                
            file_path = os.path.join(root, file)
            try:
                file_size = os.path.getsize(file_path)
                if file_size >= min_size:
                    # 添加文件修改时间用于排序
                    mtime = os.path.getmtime(file_path)
                    file_sizes[file_size].append((file_path, mtime))
            except OSError as e:
                print(f"无法获取文件 '{file_path}' 的大小: {e}")
    
    return file_sizes

def get_filename(file_path):
    """提取文件名（不含路径）"""
    return os.path.basename(file_path)

def get_md5_hash(file_path, block_size=4096, read_entire_file=False):
    """计算文件的MD5哈希值，可选择读取整个文件"""
    md5 = hashlib.md5()
    try:
        with open(file_path, 'rb') as f:
            if read_entire_file:
                # 读取整个文件
                while chunk := f.read(block_size):
                    md5.update(chunk)
            else:
                # 只读取指定字节数
                data = f.read(block_size)
                if data:
                    md5.update(data)
        return md5.hexdigest()
    except OSError as e:
        print(f"无法读取文件 '{file_path}' 计算MD5: {e}")
        return None

def calculate_statistics(file_sizes):
    """计算并返回文件统计信息"""
    stats = {
        'total_files': 0,
        'single_file_groups': 0,
        'multiple_file_groups': 0,
        'total_size': 0
    }
    
    for size, files in file_sizes.items():
        files_count = len(files)
        stats['total_files'] += files_count
        stats['total_size'] += size * files_count
        if files_count == 1:
            stats['single_file_groups'] += 1
        else:
            stats['multiple_file_groups'] += 1
    
    return stats

def find_duplicate_groups(files, read_bytes, check_content_only=False):
    """查找并返回重复文件组（同名或同MD5）"""
    duplicate_groups = defaultdict(list)
    name_to_md5 = {}
    read_entire_file = (read_bytes == -1)
    
    # 预先计算所有文件的MD5
    for file_path, _ in files:
        name_to_md5[file_path] = get_md5_hash(file_path, read_bytes, read_entire_file)
    
    # 如果不只是检查内容，则先处理同名文件
    if not check_content_only:
        name_groups = defaultdict(list)
        for file_path, _ in files:
            name = get_filename(file_path)
            name_groups[name].append(file_path)
        
        processed_files = set()
        for name, paths in name_groups.items():
            if len(paths) > 1:
                # 使用第一个文件的MD5作为组标识，或生成一个基于名称的标识
                group_id = name_to_md5[paths[0]] or f"name:{name}"
                duplicate_groups[group_id].extend(paths)
                processed_files.update(paths)
        
        # 处理剩余文件（不同名），按MD5分组
        remaining_files = [f[0] for f in files if f[0] not in processed_files]
    else:
        # 只检查内容，不考虑文件名
        remaining_files = [f[0] for f in files]
    
    md5_groups = defaultdict(list)
    for file_path in remaining_files:
        md5_hash = name_to_md5[file_path]
        if md5_hash:
            md5_groups[md5_hash].append(file_path)
    
    for md5_hash, paths in md5_groups.items():
        if len(paths) > 1:
            duplicate_groups[md5_hash].extend(paths)
    
    return duplicate_groups

def collect_all_duplicate_groups(file_sizes, read_bytes, content_only):
    """收集所有重复文件组，先完成所有计算"""
    all_duplicates = []
    file_mtime_map = {}
    
    # 首先收集所有文件的修改时间
    for size, files in file_sizes.items():
        for file_path, mtime in files:
            file_mtime_map[file_path] = mtime
    
    # 然后处理所有大小组，找出重复文件
    for size in sorted(file_sizes.keys(), reverse=True):
        files = file_sizes[size]
        if len(files) > 1:
            # 查找重复文件组
            duplicate_groups = find_duplicate_groups(files, read_bytes, content_only)
            
            # 为每个重复组添加大小信息和修改时间
            for group_id, paths in duplicate_groups.items():
                # 确定组类型描述
                if group_id.startswith("name:"):
                    group_type = f"同名文件组（文件名: {group_id[5:]}）"
                else:
                    group_type = f"内容相同文件组（MD5值: {group_id}）"
                
                # 获取带修改时间的路径列表
                paths_with_mtime = [(p, file_mtime_map[p]) for p in paths]
                
                all_duplicates.append({
                    "size": size,
                    "group_id": group_id,
                    "group_type": group_type,
                    "paths": paths,
                    "paths_with_mtime": paths_with_mtime
                })
    
    return all_duplicates

def get_user_selection(paths_with_mtime):
    """获取用户选择的要保留的文件，显示修改时间帮助决策"""
    # 按修改时间排序（最新的在前）
    paths_with_mtime.sort(key=lambda x: x[1], reverse=True)
    paths = [p[0] for p in paths_with_mtime]
    
    # 提示用户操作方式
    print("    请选择要保留的文件:")
    print("    - 直接回车: 默认保留最新修改的文件")
    print("    - 输入序号: 保留对应文件")
    print("    - 输入n: 跳过此组")
    print("    - 输入a: 删除此组全部文件")
    
    while True:
        answer = input(f"    请输入选择 (1-{len(paths)}): ").strip().lower()
        
        # 处理直接回车（空输入）的情况，默认保留最新的
        if not answer:
            return paths[0], paths[1:]
        
        # 处理跳过操作
        if answer == 'n':
            print("    已跳过此组")
            return None, []
        
        # 处理删除全部
        if answer == 'a':
            print("    删除此组全部文件")
            return None, paths
        
        # 处理数字输入
        try:
            selected = int(answer)
            if 1 <= selected <= len(paths):
                keep_index = selected - 1  # 转换为列表索引
                keep = paths[keep_index]
                to_delete = [p for idx, p in enumerate(paths) if idx != keep_index]
                return keep, to_delete
            else:
                print(f"    输入无效，请输入1到{len(paths)}之间的数字")
        except ValueError:
            print("    输入无效，请输入数字、n或直接回车")

def delete_files(files, trash_dir=None, dry_run=False):
    """删除文件，可以选择移动到回收站或仅模拟删除"""
    deleted = 0
    
    if dry_run:
        print("    模拟删除以下文件:")
        for path in files:
            print(f"    - {path}")
        return len(files)
    
    for path in files:
        try:
            if trash_dir and os.path.isdir(trash_dir):
                # 移动到回收站
                dest = os.path.join(trash_dir, os.path.basename(path))
                counter = 1
                while os.path.exists(dest):
                    # 处理同名文件
                    name, ext = os.path.splitext(os.path.basename(path))
                    dest = os.path.join(trash_dir, f"{name}_{counter}{ext}")
                    counter += 1
                shutil.move(path, dest)
                print(f"    已移至回收站: {path}")
            else:
                # 直接删除
                os.remove(path)
                print(f"    已删除: {path}")
            deleted += 1
        except OSError as e:
            print(f"    无法删除 {path}: {e}")
    
    return deleted

def process_all_duplicates(all_duplicates, auto_delete, trash_dir, dry_run):
    """处理所有重复文件组，在完成所有计算后统一处理"""
    deleted_count = 0
    
    if all_duplicates:
        print(f"\n共发现 {len(all_duplicates)} 个重复文件组，开始处理：\n")
        
        for i, duplicate_info in enumerate(all_duplicates, 1):
            size = duplicate_info["size"]
            group_type = duplicate_info["group_type"]
            paths_with_mtime = duplicate_info["paths_with_mtime"]
            paths = duplicate_info["paths"]
            
            print(f"  重复组 {i}/{len(all_duplicates)}: {group_type}（文件大小: {size} 字节，共 {len(paths)} 个文件）")
            for j, (path, mtime) in enumerate(sorted(paths_with_mtime, key=lambda x: x[1], reverse=True), 1):
                # 显示文件修改时间
                modified = datetime.fromtimestamp(mtime).strftime('%Y-%m-%d %H:%M:%S')
                print(f"    {j}. {path}  (修改于: {modified})")
            
            # 处理删除逻辑
            if auto_delete:
                # 自动删除模式，保留最新修改的文件
                paths_with_mtime.sort(key=lambda x: x[1], reverse=True)
                keep = paths_with_mtime[0][0]
                to_delete = [p[0] for p in paths_with_mtime[1:]]
            else:
                # 交互模式，让用户选择要保留的文件
                keep, to_delete = get_user_selection(paths_with_mtime)
            
            # 执行删除
            if to_delete:
                if keep:
                    print(f"    保留: {keep}")
                deleted_count += delete_files(to_delete, trash_dir, dry_run)
            print()  # 空行分隔
    
    return deleted_count

def print_statistics(stats, duplicate_groups_count, deleted_files):
    """打印最终统计信息"""
    def format_size(size):
        """格式化文件大小为人类可读形式"""
        for unit in ['B', 'KB', 'MB', 'GB']:
            if size < 1024.0:
                return f"{size:.2f} {unit}"
            size /= 1024.0
        return f"{size:.2f} TB"
    
    print("统计结果：")
    print(f"1. 总文件数量：{stats['total_files']}")
    print(f"2. 总文件大小：{format_size(stats['total_size'])}")
    print(f"3. 单文件大小组数量：{stats['single_file_groups']}")
    print(f"4. 多文件大小组数量：{stats['multiple_file_groups']}")
    print(f"5. 重复文件组总数：{duplicate_groups_count}")
    print(f"6. 已删除/移动的重复文件数量：{deleted_files}")

def main():
    # 解析命令行参数
    parser = OptionParser(usage="usage: %prog [options] directory")
    parser.add_option("-y", "--yes", "--auto-delete",
                      action="store_true", dest="auto_delete", default=False,
                      help="自动删除重复文件，无需交互确认 [默认: 关闭]")
    parser.add_option("-b", "--bytes", type="int", dest="read_bytes", 
                      default=4096, metavar="NUM",
                      help="计算MD5时读取的字节数，-1表示读取整个文件 [默认: 4096]")
    parser.add_option("-m", "--min-size", type="int", dest="min_size", 
                      default=0, metavar="NUM",
                      help="统计文件最小值（字节） [默认: 0]")
    parser.add_option("-t", "--trash", dest="trash_dir", 
                      help="指定回收站目录，将删除的文件移动到此目录而非直接删除")
    parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
                      help="模拟删除操作，不实际删除文件 [默认: 关闭]")
    parser.add_option("-x", "--exclude", dest="exclude", 
                      help="指定要排除的文件/目录模式（用逗号分隔）")
    parser.add_option("-c", "--content-only", action="store_true", dest="content_only", default=False,
                      help="只根据文件内容判断重复，忽略文件名 [默认: 关闭]")
    
    (options, args) = parser.parse_args()
    
    # 验证目录参数
    if not args:
        parser.error("请指定要扫描的目录")
    
    # 验证读取字节数的有效性
    if options.read_bytes < -1:
        parser.error(f"无效的字节数: {options.read_bytes}，必须是-1或正整数")
    
    # 处理排除模式
    exclude_patterns = []
    if options.exclude:
        exclude_patterns = [p.strip() for p in options.exclude.split(',')]
    
    # 验证回收站目录
    if options.trash_dir and not os.path.isdir(options.trash_dir):
        parser.error(f"指定的回收站目录 '{options.trash_dir}' 不存在")
    
    # 确定目标目录
    target_directory = args[0]
    print(f"正在扫描目录 '{target_directory}'...")
    print(f"参数: 最小文件大小={options.min_size}字节, MD5读取字节={options.read_bytes}, "
          f"排除模式={exclude_patterns}, 仅内容比较={options.content_only}\n")
    
    # 获取文件大小分组
    file_sizes = get_files_size(target_directory, options.min_size, exclude_patterns)
    if not file_sizes:
        return
    
    # 计算基本统计信息
    stats = calculate_statistics(file_sizes)
    
    # 收集所有重复文件组（先完成所有计算）
    print(f"正在分析文件，寻找重复项...")
    all_duplicates = collect_all_duplicate_groups(file_sizes, options.read_bytes, options.content_only)
    duplicate_groups_count = len(all_duplicates)
    
    print(f"分析完成，共发现 {duplicate_groups_count} 个重复文件组\n")
    
    # 处理所有重复文件组
    deleted_files = 0
    if duplicate_groups_count > 0:
        deleted_files = process_all_duplicates(
            all_duplicates, options.auto_delete, options.trash_dir, options.dry_run
        )
    else:
        print("未发现重复文件组，不需要进行删除操作\n")
    
    # 打印最终统计
    print_statistics(stats, duplicate_groups_count, deleted_files)

if __name__ == "__main__":
    main()
