from gxl_ai_utils.utils import utils_file
time_now = utils_file.do_get_now_time()

# for raw data
input_shards_path = "/home/work_nfs9/wjtian/0107_esc50-allclass_test2.jsonl"
def split_file(input_file, num_parts, output_dir):
    # utils_file.makedir_sil(output_dir)
    with open(input_file, 'rb') as f:
        f.seek(0, 2)  # 将文件指针移动到文件末尾
        file_size = f.tell()  # 获取文件大小
        chunk_size = file_size // num_parts  # 计算每一份的大小
        utils_file.logging_limit_print(f'chunk_size:{chunk_size}')
        f.seek(0)  # 将文件指针移回文件开头
        for i in range(num_parts):
            part_file = f"{input_file}_{i}.gxl_part"
            part_file = f'{output_dir}/{part_file.split("/")[-1]}'
            utils_file.logging_limit_print(f'part_file:{part_file}')
            with open(part_file, 'wb') as part:
                if i == num_parts - 1:  # 最后一部分可能会大一些，处理文件大小不能整除的情况
                    data = f.read()
                else:
                    data = f.read(chunk_size)
                part.write(data)

def combine_files(output_file, input_files_prefix, num_parts):
    with open(output_file, 'wb') as out:
        for i in range(num_parts):
            part_file = f"{input_files_prefix}_{i}.gxl_part"
            print(part_file)
            with open(part_file, 'rb') as part:
                data = part.read()
                out.write(data)

input_list_path = utils_file.do_get_fake_file()
input_file_path = "/home/work_nfs9/wjtian/dataset/speechtag_test_esc50-allclass.tar"
split_files_dir = '/home/work_nfs9/xlgeng/tmp'
utils_file.makedir_sil(split_files_dir)
# split_file(input_file_path, 100, split_files_dir)
combine_files("/mnt/sfs/asr/update_data/speechtag_test_esc50-allclass/speechtag_test_esc50-allclass.tar", "/mnt/sfs/asr/update_data/speechtag_test_esc50-allclass/speechtag_test_esc50-allclass.tar", 100)
# files_list = utils_file.do_get_list_for_wav_dir(split_files_dir, suffix='.gxl_part', recursive=False)
# utils_file.print_list(files_list)
# utils_file.write_list_to_file(files_list, input_list_path)
# utils_file.do_sync_files_upload_data_multi_thread(
#     file_list_path=input_list_path,
#     username="root",
#     password="Fy!mATB@QE",
#     remote_host="139.210.101.41",
#     remote_dir=f"/mnt/sfs/asr/update_data/speechtag_test_esc50-allclass",
#     num_thread=10 # 30h
# )
# esl_time = utils_file.do_get_elapsed_time(time_now)
# utils_file.logging_info(f"Total time: {esl_time} s")