import argparse
import os
import shutil

from gxl_ai_utils.utils import utils_file


parser = argparse.ArgumentParser()
parser.add_argument('--num_nodes', type=int, help='')
parser.add_argument('--node_id', type=int, help='')
args = parser.parse_args()
num_nodes = args.num_nodes
node_id = args.node_id


file_list_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet.list"
file_list_path = "/mnt/apdcephfs_sgfd/share_304127040/Tealab/user/xuelonggeng/data/en_codeswitch_hq/parquet.list"
# output_dir = "/mnt/apdcephfs_sgfd/share_304127040/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet"
output_dir = "/apdcephfs_zwfy2/share_303841515/Tealab/data/asr/en_codeswitch_hq/parquet"
utils_file.makedir_sil(output_dir)

import os
import shutil
import hashlib
import filecmp

def files_identical(src: str, dst: str, method: str = "auto", bufsize: int = 4 * 1024 * 1024) -> bool:
    """
    返回 True 当且仅当 src 与 dst 的内容完全一致。
    - 先比较文件大小，不同直接 False
    - method="auto": 优先使用 filecmp.cmp 的逐块比较（比整文件哈希更省）
      失败或不可用时退化到哈希比较
    - method="hash": 使用哈希（blake2b，更快更安全；如需 md5 可改）
    - method="filesize": 只做文件大小的比较（不计算哈希）
    """
    if not os.path.exists(dst):
        return False

    s1 = os.stat(src)
    s2 = os.stat(dst)
    if s1.st_size != s2.st_size:
        return False

    if method == "auto":
        try:
            # 深比较（shallow=False）：逐块对比，发现不同会立刻返回
            return filecmp.cmp(src, dst, shallow=False)
        except Exception:
            method = "hash"  # 回退到哈希

    if method == "hash":
        h1 = hashlib.blake2b(digest_size=32)
        h2 = hashlib.blake2b(digest_size=32)
        with open(src, "rb") as f1, open(dst, "rb") as f2:
            while True:
                b1 = f1.read(bufsize)
                b2 = f2.read(bufsize)
                if not b1 and not b2:
                    break
                h1.update(b1)
                h2.update(b2)
        return h1.digest() == h2.digest()

    # 兜底：逐块字节对比（不计算哈希）
    with open(src, "rb") as f1, open(dst, "rb") as f2:
        while True:
            b1 = f1.read(bufsize)
            b2 = f2.read(bufsize)
            if b1 != b2:
                return False
            if not b1:  # 两边都 EOF
                return True

file_list = utils_file.load_list_file_clean(file_list_path)
file_list_list4node = utils_file.do_split_list(file_list, num_nodes)
file_list_now = file_list_list4node[node_id]
runner =utils_file.GXLMultiprocessingWithReturn(num_processes=10)
def little_func(file_list_little):
    for file_path in utils_file.tqdm(file_list_little, total=len(file_list_little), desc="copy_parquet"):
        output_path = output_dir + "/" + file_path.split("/")[-1]
        if os.path.exists(output_path) and files_identical(file_path, output_path):
            utils_file.logging_limit_print(f'Skipping {output_path}, already exists')
            continue
        try:
            shutil.copy2(str(file_path), str(output_path))  # 保留mtime等元数据
        except Exception as e:
            print(f"Error: {file_path} not found, skipping.,error: {e}")
            continue

runner.run(little_func, file_list_now)

