import json
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm

def process_line(line):
    """
    处理单行 JSON 数据。
    这里以提取 'key' 字段为例，你可以根据实际需求修改处理逻辑。
    """
    try:
        data = json.loads(line)
    except json.JSONDecodeError:
        return None  # 或者记录错误日志
    # 示例处理：提取 'key' 字段，实际逻辑可自行修改
    result = {"processed": data.get("key")}
    return json.dumps(result) + "\n"

def process_file(input_file, output_file, process_func, workers=4, chunk_size=1000):
    """
    处理超大 JSONL 文件。
    
    参数：
      input_file  : 输入 JSONL 文件路径
      output_file : 输出处理结果文件路径
      workers     : 并行处理时使用的进程数，可根据 CPU 核数调整
      chunk_size  : 每次批量提交处理的行数，建议根据内存和任务复杂度调优
    """
    # 统计总行数以显示进度
    # total_lines = sum(1 for _ in open(input_file, "r", buffering=1024*1024))
    total_lines = 1000
    
    with open(input_file, "r", buffering=1024*1024) as fin, \
         open(output_file, "w", buffering=1024*1024) as fout:
        
        lines_chunk = []
        with ProcessPoolExecutor(max_workers=workers) as executor, tqdm(total=total_lines, desc="Processing Lines", unit="lines") as progress_bar:
            for line in fin:
                lines_chunk.append(line)
                if len(lines_chunk) >= chunk_size:
                    results = list(executor.map(process_func, lines_chunk))
                    for result in results:
                        if result is not None:
                            fout.write(result)
                    lines_chunk = []
                    progress_bar.update(len(lines_chunk))
            if lines_chunk:
                results = list(executor.map(process_func, lines_chunk))
                for result in results:
                    if result is not None:
                        fout.write(result)
                progress_bar.update(len(lines_chunk))

if __name__ == "__main__":
    # 根据需要设置输入、输出文件路径以及并行参数
    input_path = "input.jsonl"
    output_path = "output.jsonl"
    process_file(input_path, output_path, workers=8, chunk_size=5000)
