"""
使用Pandas分块处理大型CSV文件
不依赖AI，纯数据处理
"""

import pandas as pd
import os
from datetime import datetime


def process_csv_in_chunks(csv_path, chunk_size=10000):
    """
    分块处理大型CSV文件
    
    Args:
        csv_path: CSV文件路径
        chunk_size: 每块的行数
    """
    
    print(f"开始处理文件: {csv_path}")
    print(f"分块大小: {chunk_size} 行")
    print("-" * 50)
    
    total_rows = 0
    chunk_count = 0
    
    try:
        # 使用迭代器逐块读取
        chunk_iterator = pd.read_csv(
            csv_path, 
            chunksize=chunk_size,
            encoding='utf-8',
            low_memory=False
        )
        
        # 用于存储统计信息
        stats = {
            'total_rows': 0,
            'columns': [],
            'missing_values': {},
            'data_types': {}
        }
        
        for i, chunk in enumerate(chunk_iterator, 1):
            chunk_count = i
            total_rows += len(chunk)
            
            # 第一块时获取列信息
            if i == 1:
                stats['columns'] = list(chunk.columns)
                stats['data_types'] = chunk.dtypes.to_dict()
                print(f"\n列信息 ({len(chunk.columns)} 列):")
                for col in chunk.columns:
                    print(f"  - {col}: {chunk[col].dtype}")
                print("\n" + "-" * 50)
            
            # 统计缺失值
            for col in chunk.columns:
                if col not in stats['missing_values']:
                    stats['missing_values'][col] = 0
                stats['missing_values'][col] += chunk[col].isna().sum()
            
            # 显示进度
            if i % 10 == 0:
                print(f"已处理: {total_rows:,} 行 ({i} 个块)")
            
            # 这里可以添加你的数据处理逻辑
            # 例如: 过滤、转换、聚合等
            # processed_chunk = process_chunk(chunk)
        
        stats['total_rows'] = total_rows
        
        print("\n" + "=" * 50)
        print("处理完成!")
        print("=" * 50)
        print(f"\n总计处理:")
        print(f"  - 数据块数: {chunk_count}")
        print(f"  - 总行数: {total_rows:,}")
        print(f"  - 总列数: {len(stats['columns'])}")
        
        print(f"\n缺失值统计:")
        for col, missing_count in stats['missing_values'].items():
            if missing_count > 0:
                percentage = (missing_count / total_rows) * 100
                print(f"  - {col}: {missing_count:,} ({percentage:.2f}%)")
        
        return stats
        
    except Exception as e:
        print(f"处理文件时出错: {e}")
        return None


def analyze_csv_structure(csv_path, sample_size=1000):
    """
    快速分析CSV文件结构
    
    Args:
        csv_path: CSV文件路径
        sample_size: 采样大小
    """
    
    print(f"分析文件结构: {csv_path}")
    print("-" * 50)
    
    try:
        # 读取样本数据
        df_sample = pd.read_csv(csv_path, nrows=sample_size, encoding='utf-8')
        
        print(f"\n基本信息:")
        print(f"  - 采样行数: {len(df_sample)}")
        print(f"  - 列数: {len(df_sample.columns)}")
        
        file_size = os.path.getsize(csv_path) / (1024 * 1024)
        print(f"  - 文件大小: {file_size:.2f} MB")
        
        print(f"\n数据类型:")
        print(df_sample.dtypes)
        
        print(f"\n数据概览:")
        print(df_sample.head(10))
        
        print(f"\n统计摘要:")
        print(df_sample.describe())
        
        return df_sample
        
    except Exception as e:
        print(f"分析文件时出错: {e}")
        return None


def filter_and_export(csv_path, output_path, filter_func=None, chunk_size=10000):
    """
    过滤CSV数据并导出
    
    Args:
        csv_path: 输入CSV路径
        output_path: 输出CSV路径
        filter_func: 过滤函数
        chunk_size: 分块大小
    """
    
    print(f"过滤并导出数据...")
    print(f"输入: {csv_path}")
    print(f"输出: {output_path}")
    
    first_chunk = True
    total_rows_written = 0
    
    try:
        chunk_iterator = pd.read_csv(
            csv_path,
            chunksize=chunk_size,
            encoding='utf-8',
            low_memory=False
        )
        
        for chunk in chunk_iterator:
            # 应用过滤函数
            if filter_func:
                filtered_chunk = chunk[filter_func(chunk)]
            else:
                filtered_chunk = chunk
            
            # 写入文件
            filtered_chunk.to_csv(
                output_path,
                mode='w' if first_chunk else 'a',
                header=first_chunk,
                index=False,
                encoding='utf-8'
            )
            
            total_rows_written += len(filtered_chunk)
            first_chunk = False
        
        print(f"\n完成! 共写入 {total_rows_written:,} 行")
        
    except Exception as e:
        print(f"过滤导出时出错: {e}")


def main():
    """主函数"""
    
    csv_files = [
        "output/基本设施.csv",
        "output/设施集合.csv"
    ]
    
    print("=" * 50)
    print("大型CSV文件处理工具")
    print("=" * 50)
    
    print("\n可用的CSV文件:")
    for i, file in enumerate(csv_files, 1):
        if os.path.exists(file):
            file_size = os.path.getsize(file) / (1024 * 1024)
            print(f"{i}. {file} ({file_size:.2f} MB)")
    
    print("\n操作选项:")
    print("1. 快速分析文件结构")
    print("2. 分块处理整个文件")
    print("3. 退出")
    
    try:
        operation = int(input("\n选择操作 (1-3): "))
        
        if operation == 3:
            return
        
        file_choice = int(input("选择文件 (1-2): "))
        
        if 1 <= file_choice <= len(csv_files):
            selected_file = csv_files[file_choice - 1]
            
            if operation == 1:
                analyze_csv_structure(selected_file)
            elif operation == 2:
                process_csv_in_chunks(selected_file)
        else:
            print("无效的文件选择")
            
    except ValueError:
        print("请输入有效的数字")
    except KeyboardInterrupt:
        print("\n\n程序已终止")


if __name__ == "__main__":
    main()

