import os


def split_large_file(input_file, output_prefix, chunk_size=10 * 1024 * 1024):
    """
    拆分大文本文件为多个小文件

    参数:
        input_file: 输入的大文件路径
        output_prefix: 输出文件的前缀
        chunk_size: 每个小文件的目标大小(字节)，默认为10MB
    """
    # 确保输入文件存在
    if not os.path.exists(input_file):
        print(f"错误: 输入文件 '{input_file}' 不存在")
        return

    # 创建输出目录(如果不存在)
    output_dir = os.path.dirname(output_prefix)
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 初始化计数器
    part_num = 1
    current_size = 0
    current_lines = []

    # 打开输入文件
    with open(input_file, 'r', encoding='utf-8') as f_in:
        for line in f_in:
            current_lines.append(line)
            current_size += len(line.encode('utf-8'))

            # 当达到或超过目标大小时写入文件
            if current_size >= chunk_size:
                output_file = f"{output_prefix}_{part_num:03d}.txt"
                with open(output_file, 'w', encoding='utf-8') as f_out:
                    f_out.writelines(current_lines)

                print(f"已创建: {output_file} (大小: {current_size / 1024 / 1024:.2f}MB)")

                # 重置计数器和缓存
                part_num += 1
                current_size = 0
                current_lines = []

        # 写入剩余的最后部分(如果有)
        if current_lines:
            output_file = f"{output_prefix}_{part_num:03d}.txt"
            with open(output_file, 'w', encoding='utf-8') as f_out:
                f_out.writelines(current_lines)
            print(f"已创建: {output_file} (大小: {current_size / 1024 / 1024:.2f}MB)")


if __name__ == "__main__":
    # 使用示例
    input_file = "/Users/daxiang/Downloads/TR_LA_002_cameras.2025-04-08.log"  # 替换为你的大文件路径
    output_prefix = "/Users/daxiang/Downloads/output/split_file"  # 输出文件前缀

    split_large_file(input_file, output_prefix)
