# fast_split_csv.py
import os

def split_file_fast(csv_path, out_dir, lines_per_file=1000000):
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    base = os.path.basename(csv_path)
    file_idx = 0
    lines_written = 0
    buffer = []
    part_file = None

    def new_part_file():
        nonlocal file_idx, part_file
        if part_file:
            part_file.close()
        part_path = os.path.join(out_dir, f"part_{file_idx:03d}.csv")
        file_idx += 1
        return open(part_path, "w", encoding="utf-8")

    part_file = new_part_file()

    with open(csv_path, "r", encoding="utf-8", buffering=1024*1024) as f:  # 1MB 缓冲
        for line in f:
            part_file.write(line)
            lines_written += 1
            if lines_written >= lines_per_file:
                part_file.close()
                part_file = new_part_file()
                lines_written = 0

    if part_file:
        part_file.close()
    print(f"✅ 拆分完成，共 {file_idx} 个文件")

if __name__ == "__main__":
    csv_path = r"D:\data\公开数据集\淘宝用户行为数据\UserBehavior.csv"
    out_dir = r"D:\data\公开数据集\淘宝用户行为数据\split_parts"
    split_file_fast(csv_path, out_dir, lines_per_file=1000000)
