# -*- coding: utf-8 -*-

import os
import pandas as pd
import argparse
import hashlib


def parse_arguments():
    parser = argparse.ArgumentParser(description='Restore files based on CSV records')
    parser.add_argument('--csv_file', required=True, help='Input CSV file path with records')
    parser.add_argument('--base_directory', required=True, help='Base directory for file paths')
    parser.add_argument('--output_csv', required=True, help='Output CSV file path for missing files')
    return parser.parse_args()


def read_csv_records(csv_file):
    """从 CSV 文件读取记录"""
    print(f"Reading CSV file: {csv_file}")
    df = pd.read_csv(csv_file)
    print(f"CSV file read complete. Total records: {len(df)}")
    return df


def calculate_md5(file_path):
    """计算文件的 MD5 哈希值"""
    hash_md5 = hashlib.md5()
    with open(file_path, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()


def find_files_by_type_and_size(base_directory, file_type, file_size):
    """在指定目录中查找符合文件类型和文件大小的文件"""
    matched_files = []
    for root, _, files in os.walk(base_directory):
        for file in files:
            if file.endswith(file_type):
                file_path = os.path.join(root, file)
                if os.path.getsize(file_path) == file_size:
                    matched_files.append(file_path)
    return matched_files


def process_file_entry(row, base_directory, output_csv):
    """处理单个文件条目，返回文件信息"""
    file_id = row['id']
    abs_path = row['ABSOLUTEPATH']
    sys_filename = row['SYSFILENAME']
    op_code = row['OPERATIONCODE']
    op_id = row['OPERATIONID']
    phy_filename = row['PHYFILENAME']
    file_type = row['FILETYPE']
    file_size = row['FILESIZE']

    matched_files = find_files_by_type_and_size(base_directory, file_type, file_size)
    if matched_files:
        data = []
        for file_path in matched_files:
            file_md5 = calculate_md5(file_path)
            data.append({
                '操作代码': op_code,
                '业务ID': op_id,
                '文件ID': file_id,
                '文件名称': phy_filename,
                '文件路径': file_path,
                '文件MD5': file_md5
            })
    else:
        # 当没有找到匹配的文件时，添加一个记录
        data = [{
            '操作代码': op_code,
            '业务ID': op_id,
            '文件ID': file_id,
            '文件名称': phy_filename,
            '文件路径': 'file not found',
            '文件MD5': '0'
        }]

    # 将结果写入 CSV 文件
    print(f"正在处理数据：{file_id} 文件标题：{phy_filename}")
    df = pd.DataFrame(data)
    df.to_csv(output_csv, mode='a', header=not os.path.exists(output_csv), index=False)
    print(f"已处理并保存条目：{file_id}")


def save_files_info_to_csv(records, base_directory, output_csv):
    """处理记录并将文件信息保存到 CSV 文件"""
    print("开始处理文件")
    for _, row in records.iterrows():
        process_file_entry(row, base_directory, output_csv)
    print("所有文件条目已处理")


if __name__ == '__main__':
    args = parse_arguments()

    # 从 CSV 文件读取记录
    records = read_csv_records(args.csv_file)

    # 将恢复的文件信息保存到 CSV 文件
    save_files_info_to_csv(records, args.base_directory, args.output_csv)

    print(f"文件信息已保存到 {args.output_csv}")
