#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CSV文件分片工具
按行数分片，保持数据完整性，每个分片保留CSV头部信息
目标：将大CSV文件分片为指定大小的小文件
"""

import csv
import os
import sys
from pathlib import Path
from typing import List, Tuple, Optional


class CSVSharding:
    """CSV文件分片处理类"""
    
    def __init__(self, target_size_mb: float = 10.0):
        """
        初始化CSV分片器
        
        Args:
            target_size_mb: 目标分片大小（MB），默认10MB
        """
        self.target_size_mb = target_size_mb
        self.target_size_bytes = int(target_size_mb * 1024 * 1024)
    
    def estimate_rows_per_shard(self, file_path: str) -> Tuple[int, int, float]:
        """
        估算每个分片应包含的行数
        
        Args:
            file_path: CSV文件路径
            
        Returns:
            tuple: (每分片行数, 总行数, 平均行大小字节)
        """
        print(f"正在分析文件: {file_path}")
        
        file_size = os.path.getsize(file_path)
        print(f"文件大小: {file_size / (1024*1024):.2f} MB")
        
        # 读取前1000行来估算平均行大小
        sample_size = 0
        sample_rows = 0
        
        with open(file_path, 'r', encoding='utf-8', newline='') as file:
            reader = csv.reader(file)
            
            # 跳过表头
            header = next(reader, None)
            if header:
                header_size = len(','.join(header).encode('utf-8')) + 1  # +1 for newline
            else:
                header_size = 0
            
            # 采样前1000行
            for i, row in enumerate(reader):
                if i >= 1000:
                    break
                row_text = ','.join(row) + '\n'
                sample_size += len(row_text.encode('utf-8'))
                sample_rows += 1
        
        # 计算总行数（近似）
        if sample_rows > 0:
            avg_row_size = sample_size / sample_rows
            total_data_size = file_size - header_size
            estimated_total_rows = int(total_data_size / avg_row_size)
        else:
            avg_row_size = 100  # 默认值
            estimated_total_rows = 1000
        
        # 计算每个分片的行数
        max_data_per_shard = self.target_size_bytes - header_size - 1024  # 预留1KB缓冲
        rows_per_shard = max(1, int(max_data_per_shard / avg_row_size))
        
        print(f"估算总行数: {estimated_total_rows:,}")
        print(f"平均行大小: {avg_row_size:.1f} 字节")
        print(f"每分片预计行数: {rows_per_shard:,}")
        
        return rows_per_shard, estimated_total_rows, avg_row_size
    
    def split_csv(self, input_file: str, output_dir: Optional[str] = None) -> List[str]:
        """
        分片CSV文件
        
        Args:
            input_file: 输入CSV文件路径
            output_dir: 输出目录，默认为输入文件所在目录
            
        Returns:
            list: 生成的分片文件路径列表
        """
        input_path = Path(input_file)
        if not input_path.exists():
            raise FileNotFoundError(f"文件不存在: {input_file}")
        
        # 设置输出目录
        if output_dir is None:
            output_dir = input_path.parent
        else:
            output_dir = Path(output_dir)
            output_dir.mkdir(parents=True, exist_ok=True)
        
        # 估算分片参数
        rows_per_shard, total_rows, avg_row_size = self.estimate_rows_per_shard(input_file)
        
        output_files = []
        current_shard = 1
        current_row_count = 0
        current_file_size = 0
        
        # 生成输出文件名模板
        base_name = input_path.stem
        extension = input_path.suffix
        
        print(f"\n开始分片处理...")
        
        with open(input_file, 'r', encoding='utf-8', newline='') as infile:
            reader = csv.reader(infile)
            
            # 读取表头
            header = next(reader, None)
            if not header:
                raise ValueError("CSV文件没有表头")
            
            header_text = ','.join(header) + '\n'
            header_size = len(header_text.encode('utf-8'))
            
            current_outfile = None
            current_writer = None
            
            for row_index, row in enumerate(reader):
                # 检查是否需要创建新的分片文件
                if (current_outfile is None or 
                    current_row_count >= rows_per_shard or 
                    current_file_size >= self.target_size_bytes):
                    
                    # 关闭当前文件
                    if current_outfile:
                        current_outfile.close()
                        actual_size = os.path.getsize(current_output_path)
                        print(f"  完成分片 {current_shard-1}: {actual_size/(1024*1024):.2f} MB, {current_row_count:,} 行")
                    
                    # 创建新分片文件
                    current_output_path = output_dir / f"{base_name}_part{current_shard:03d}{extension}"
                    current_outfile = open(current_output_path, 'w', encoding='utf-8', newline='')
                    current_writer = csv.writer(current_outfile)
                    
                    # 写入表头
                    current_writer.writerow(header)
                    current_file_size = header_size
                    current_row_count = 0
                    
                    output_files.append(str(current_output_path))
                    print(f"  创建分片 {current_shard}: {current_output_path.name}")
                    current_shard += 1
                
                # 写入数据行
                current_writer.writerow(row)
                row_text = ','.join(row) + '\n'
                current_file_size += len(row_text.encode('utf-8'))
                current_row_count += 1
                
                # 显示进度
                if (row_index + 1) % 10000 == 0:
                    progress = (row_index + 1) / total_rows * 100 if total_rows > 0 else 0
                    print(f"  处理进度: {row_index + 1:,} 行 ({progress:.1f}%)")
            
            # 关闭最后一个文件
            if current_outfile:
                current_outfile.close()
                actual_size = os.path.getsize(current_output_path)
                print(f"  完成分片 {current_shard-1}: {actual_size/(1024*1024):.2f} MB, {current_row_count:,} 行")
        
        print(f"\n分片完成！")
        print(f"总共生成 {len(output_files)} 个分片文件:")
        
        total_output_size = 0
        for i, file_path in enumerate(output_files, 1):
            size_mb = os.path.getsize(file_path) / (1024 * 1024)
            total_output_size += size_mb
            print(f"  {i}. {Path(file_path).name} - {size_mb:.2f} MB")
        
        print(f"输出总大小: {total_output_size:.2f} MB")
        
        return output_files


def main():
    """主函数 - 命令行接口"""
    if len(sys.argv) < 2:
        print("用法: python csv_sharding.py <CSV文件路径> [目标大小MB] [输出目录]")
        print("示例: python csv_sharding.py data.csv 10 ./output")
        sys.exit(1)
    
    input_file = sys.argv[1]
    target_size = float(sys.argv[2]) if len(sys.argv) > 2 else 10.0
    output_dir = sys.argv[3] if len(sys.argv) > 3 else None
    
    try:
        sharding = CSVSharding(target_size_mb=target_size)
        output_files = sharding.split_csv(input_file, output_dir)
        
        print(f"\n✅ 分片成功完成!")
        print(f"生成了 {len(output_files)} 个分片文件")
        
    except Exception as e:
        print(f"❌ 错误: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()
