import dask.dataframe as dd
import os
import logging
import pandas as pd
# 切分csv文件为自定义大小并输出到新的目录文件夹下面.
#需要安装的库：pandas, tqdm, logging, dask
# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def estimate_row_size(file_path, sample_size=1000, encoding='gbk'):
    logging.info(f"估算每行的平均字节数: {file_path}")
    with open(file_path, 'r', encoding=encoding) as file:
        sample_data = [next(file) for _ in range(sample_size)]
        average_row_size = sum(len(row) for row in sample_data) / sample_size
        logging.info(f"每行平均字节数: {average_row_size} 字节")
        return average_row_size

def split_csv_by_size_dask(file_path, output_dir, target_chunk_size_mb=200, encoding='gbk', prefix='chunk'):
    # 估算每行的平均字节数
    average_row_size = estimate_row_size(file_path, encoding=encoding)

    # 计算合适的 chunk_size
    target_chunk_size_bytes = target_chunk_size_mb * 1024 * 1024
    chunk_size = int(target_chunk_size_bytes // average_row_size)
    logging.info(f"计算出的 chunk_size: {chunk_size} 行")

    # 确保输出目录存在
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        logging.info(f"创建输出目录: {output_dir}")

    logging.info(f"开始读取CSV文件: {file_path}，编码: {encoding}")
    # 使用 dask 读取 CSV 文件
    ddf = dd.read_csv(file_path, encoding=encoding)
    logging.info(f"CSV文件读取完成，分区数: {ddf.npartitions}")

    # 计算每个分区的大小
    partition_sizes = ddf.map_partitions(len).compute()
    total_rows = sum(partition_sizes)
    logging.info(f"总行数: {total_rows}")

    # 计算每个分区的目标大小
    target_partition_size = target_chunk_size_bytes // average_row_size
    logging.info(f"每个分区的目标大小: {target_partition_size} 行")

    # 分割数据并保存到单独的文件中
    current_chunk = 0
    current_chunk_size = 0
    current_chunk_data = []

    for i, part in enumerate(ddf.partitions):
        logging.info(f"开始处理分区 {i + 1}/{ddf.npartitions}")
        part_df = part.compute()
        part_size = len(part_df)
        logging.info(f"分区 {i + 1} 行数: {part_size}")

        if current_chunk_size + part_size > target_partition_size:
            # 保存当前chunk
            output_file = f"{output_dir}/{prefix}_{current_chunk + 1}.csv"
            current_chunk_df = pd.concat(current_chunk_data)
            current_chunk_df.to_csv(output_file, index=False)
            logging.info(f"分区 {current_chunk + 1} 保存完成: {output_file}")
            current_chunk += 1
            current_chunk_size = 0
            current_chunk_data = []

        # 添加当前分区到当前chunk
        current_chunk_data.append(part_df)
        current_chunk_size += part_size

    # 保存最后一个chunk
    if current_chunk_data:
        output_file = f"{output_dir}/{prefix}_{current_chunk + 1}.csv"
        current_chunk_df = pd.concat(current_chunk_data)
        current_chunk_df.to_csv(output_file, index=False)
        logging.info(f"分区 {current_chunk + 1} 保存完成: {output_file}")

# 示例调用
file_path = r"D:\E\桌面备份\1_0602.csv"
output_dir = r"D:\E\桌面备份\1_0602"
target_chunk_size_mb = 200  # 每个分段的目标大小（MB）
encoding = 'gbk'  # 修改编码格式为 gbk
prefix = 'tc_sal_orderdtlnsz'  # 自定义前缀

split_csv_by_size_dask(file_path, output_dir, target_chunk_size_mb, encoding, prefix)

logging.info(
    f"成功将CSV文件分割为多个文件，每个文件大小在 {target_chunk_size_mb}MB 以内，保存在：{output_dir}"
)
