from pathlib import Path
import pandas as pd
from collections import defaultdict

def find_duplicate_files(directory, file_extensions):
    """查找指定类型的重复文件"""
    file_map = defaultdict(list)
    
    # 使用 pathlib.Path 遍历目录及其子目录
    path = Path(directory)
    for file in path.rglob('*'):  # rglob('*') 可以递归查找所有文件
        if file.is_file() and file.suffix.lower() in file_extensions:
            file_map[file.name].append(str(file))  # 将文件路径添加到字典中

    # 过滤出重复的文件
    duplicates = {filename: paths for filename, paths in file_map.items() if len(paths) > 1}
    
    return duplicates

def save_duplicates_to_csv(duplicates, csv_path):
    """将重复文件信息保存到 CSV"""
    data = []

    for filename, paths in duplicates.items():
        for path in paths:
            data.append([filename, path])

    df = pd.DataFrame(data, columns=['文件名', '文件路径'])
    df.to_csv(csv_path, index=False, encoding='utf-8-sig')

def main(directory, csv_path):
    # 指定需要查找的文件类型
    file_extensions = {'.mp4', '.mp3', '.aac'}
    
    duplicates = find_duplicate_files(directory, file_extensions)
    save_duplicates_to_csv(duplicates, csv_path)
    print(f"重复文件信息已保存到 {csv_path}")

if __name__ == "__main__":
    target_directory = r'D:\Documents\Music'  # 替换为您的目录路径
    output_csv_path = r'D:\Documents\duplicates.csv'  # 替换为输出 CSV 的路径
    main(target_directory, output_csv_path)
