# -*- coding: utf-8 -*-

import os
import pandas as pd
import argparse
import hashlib
from concurrent.futures import ThreadPoolExecutor, as_completed

def parse_arguments():
    parser = argparse.ArgumentParser(description='Restore files based on CSV records')
    parser.add_argument('--csv_file', required=True, help='Input CSV file path with records')
    parser.add_argument('--base_directory', required=True, help='Base directory for file paths')
    parser.add_argument('--output_csv', required=True, help='Output CSV file path for missing files')
    return parser.parse_args()

def read_csv_records(csv_file):
    """从 CSV 文件读取记录"""
    df = pd.read_csv(csv_file)
    return df

def calculate_md5(file_path):
    """计算文件的 MD5 哈希值"""
    hash_md5 = hashlib.md5()
    with open(file_path, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()

def find_files_by_type_and_size(base_directory, file_type, file_size):
    """在指定目录中查找符合文件类型和文件大小的文件"""
    matched_files = []
    for root, _, files in os.walk(base_directory):
        for file in files:
            if file.endswith(file_type):
                file_path = os.path.join(root, file)
                if os.path.getsize(file_path) == file_size:
                    matched_files.append(file_path)
    return matched_files

def process_file_entry(row, base_directory):
    """处理单个文件条目，返回文件信息"""
    file_id = row['id']
    abs_path = row['ABSOLUTEPATH']
    sys_filename = row['SYSFILENAME']
    op_code = row['OPERATIONCODE']
    op_id = row['OPERATIONID']
    phy_filename = row['PHYFILENAME']
    file_type = row['FILETYPE']
    file_size = row['FILESIZE']

    matched_files = find_files_by_type_and_size(base_directory, file_type, file_size)
    data = []
    if matched_files:
        for file_path in matched_files:
            file_md5 = calculate_md5(file_path)
            data.append({
                '操作代码': op_code,
                '业务ID': op_id,
                '文件ID': file_id,
                '文件名称': phy_filename,
                '文件路径': file_path,
                '文件MD5': file_md5
            })
    else:
        # 当没有找到匹配的文件时，添加一个记录
        data.append({
            '操作代码': op_code,
            '业务ID': op_id,
            '文件ID': file_id,
            '文件名称': phy_filename,
            '文件路径': 'file not found',
            '文件MD5': '0'
        })
    return data

def save_files_info_to_csv(records, base_directory, output_csv):
    """将恢复的文件信息保存到 CSV 文件"""
    data = []
    with ThreadPoolExecutor() as executor:
        futures = [executor.submit(process_file_entry, row, base_directory) for _, row in records.iterrows()]
        for future in as_completed(futures):
            data.extend(future.result())

    df = pd.DataFrame(data)

    # 打印 DataFrame 的列名和示例数据以调试
    print("DataFrame列名：", df.columns.tolist())
    print("DataFrame 示例数据：", df.head())

    # 保存 CSV 文件
    df.to_csv(output_csv, index=False, header=True)
    print(f"File information saved to {output_csv}")

if __name__ == '__main__':
    args = parse_arguments()

    # 从 CSV 文件读取记录
    records = read_csv_records(args.csv_file)

    # 将恢复的文件信息保存到 CSV 文件
    save_files_info_to_csv(records, args.base_directory, args.output_csv)

    print(f"Missing files saved to {args.output_csv}")
