"""
综合示例脚本
演示如何使用pandas处理大型CSV文件的常见任务
"""

import pandas as pd
import os


def example_1_basic_info(csv_path):
    """
    示例1: 获取基本信息（只读取少量数据）
    """
    print("\n" + "="*50)
    print("示例1: 获取CSV基本信息")
    print("="*50)
    
    # 只读取前100行快速查看
    df = pd.read_csv(csv_path, nrows=100, encoding='utf-8')
    
    print(f"\n列名 ({len(df.columns)} 列):")
    for i, col in enumerate(df.columns, 1):
        print(f"  {i}. {col}")
    
    print(f"\n数据类型:")
    print(df.dtypes)
    
    print(f"\n前5行数据:")
    print(df.head())


def example_2_count_rows(csv_path):
    """
    示例2: 快速统计总行数（使用分块）
    """
    print("\n" + "="*50)
    print("示例2: 统计总行数")
    print("="*50)
    
    total_rows = 0
    chunk_size = 10000
    
    print("正在统计...")
    for chunk in pd.read_csv(csv_path, chunksize=chunk_size, encoding='utf-8'):
        total_rows += len(chunk)
        if total_rows % 100000 == 0:
            print(f"  已统计: {total_rows:,} 行")
    
    print(f"\n总行数: {total_rows:,}")
    return total_rows


def example_3_filter_data(csv_path, output_path):
    """
    示例3: 过滤数据并导出
    """
    print("\n" + "="*50)
    print("示例3: 过滤数据")
    print("="*50)
    
    chunk_size = 10000
    first_chunk = True
    total_filtered = 0
    
    print("正在过滤数据...")
    
    for i, chunk in enumerate(pd.read_csv(csv_path, chunksize=chunk_size, encoding='utf-8'), 1):
        # 示例过滤条件（根据实际数据修改）
        # 这里假设有一个数值列，你可以根据实际情况修改
        
        # 例如: 只保留非空行
        filtered_chunk = chunk.dropna(how='all')
        
        # 或者: 根据某列的值过滤
        # filtered_chunk = chunk[chunk['某列名'] > 某个值]
        
        # 写入文件
        filtered_chunk.to_csv(
            output_path,
            mode='w' if first_chunk else 'a',
            header=first_chunk,
            index=False,
            encoding='utf-8'
        )
        
        total_filtered += len(filtered_chunk)
        first_chunk = False
        
        if i % 10 == 0:
            print(f"  已处理: {i * chunk_size:,} 行, 保留: {total_filtered:,} 行")
    
    print(f"\n完成! 共保留 {total_filtered:,} 行")
    print(f"输出文件: {output_path}")


def example_4_column_statistics(csv_path):
    """
    示例4: 计算列的统计信息
    """
    print("\n" + "="*50)
    print("示例4: 列统计信息")
    print("="*50)
    
    chunk_size = 10000
    
    # 读取第一块获取列名
    first_chunk = pd.read_csv(csv_path, nrows=chunk_size, encoding='utf-8')
    columns = first_chunk.columns.tolist()
    
    # 初始化统计字典
    stats = {col: {'count': 0, 'missing': 0} for col in columns}
    
    print("正在统计...")
    
    for chunk in pd.read_csv(csv_path, chunksize=chunk_size, encoding='utf-8'):
        for col in columns:
            stats[col]['count'] += len(chunk)
            stats[col]['missing'] += chunk[col].isna().sum()
    
    print("\n缺失值统计:")
    for col, stat in stats.items():
        missing_pct = (stat['missing'] / stat['count']) * 100
        if stat['missing'] > 0:
            print(f"  {col}: {stat['missing']:,} ({missing_pct:.2f}%)")
    
    print("\n完整性较好的列 (缺失值<5%):")
    for col, stat in stats.items():
        missing_pct = (stat['missing'] / stat['count']) * 100
        if missing_pct < 5:
            print(f"  {col}: {missing_pct:.2f}%")


def example_5_unique_values(csv_path, column_name):
    """
    示例5: 统计某列的唯一值
    """
    print("\n" + "="*50)
    print(f"示例5: 统计 '{column_name}' 列的唯一值")
    print("="*50)
    
    chunk_size = 10000
    unique_values = set()
    value_counts = {}
    
    print("正在统计...")
    
    try:
        for chunk in pd.read_csv(csv_path, chunksize=chunk_size, encoding='utf-8'):
            if column_name in chunk.columns:
                # 收集唯一值
                chunk_values = chunk[column_name].dropna().unique()
                unique_values.update(chunk_values)
                
                # 统计频次
                for value in chunk[column_name].dropna():
                    value_counts[value] = value_counts.get(value, 0) + 1
        
        print(f"\n唯一值数量: {len(unique_values):,}")
        
        if len(unique_values) <= 20:
            print("\n所有唯一值:")
            for val in sorted(unique_values):
                print(f"  - {val}: {value_counts[val]:,} 次")
        else:
            print("\n最常见的10个值:")
            top_10 = sorted(value_counts.items(), key=lambda x: x[1], reverse=True)[:10]
            for val, count in top_10:
                print(f"  - {val}: {count:,} 次")
    
    except Exception as e:
        print(f"错误: {e}")
        print(f"可能原因: 列名 '{column_name}' 不存在")


def example_6_data_aggregation(csv_path):
    """
    示例6: 数据聚合（分组统计）
    """
    print("\n" + "="*50)
    print("示例6: 数据聚合")
    print("="*50)
    
    chunk_size = 10000
    
    # 这个示例需要根据实际数据结构修改
    print("读取样本数据...")
    sample = pd.read_csv(csv_path, nrows=1000, encoding='utf-8')
    
    print("\n数据概览:")
    print(sample.describe())
    
    print("\n提示: 根据你的实际列名修改代码进行聚合")
    print("例如:")
    print("  - df.groupby('类别列').size()")
    print("  - df.groupby('分组列')['数值列'].mean()")


def main():
    """主函数"""
    
    csv_files = [
        "output/基本设施.csv",
        "output/设施集合.csv"
    ]
    
    print("="*50)
    print("CSV处理示例脚本")
    print("="*50)
    
    print("\n可用的CSV文件:")
    for i, file in enumerate(csv_files, 1):
        if os.path.exists(file):
            size_mb = os.path.getsize(file) / (1024 * 1024)
            print(f"{i}. {file} ({size_mb:.2f} MB)")
    
    print("\n可用示例:")
    print("1. 获取基本信息")
    print("2. 统计总行数")
    print("3. 过滤数据并导出")
    print("4. 列统计信息")
    print("5. 统计某列唯一值")
    print("6. 数据聚合示例")
    print("7. 运行所有示例")
    print("0. 退出")
    
    try:
        choice = int(input("\n选择示例 (0-7): "))
        
        if choice == 0:
            return
        
        file_idx = int(input("选择文件 (1-2): "))
        
        if 1 <= file_idx <= len(csv_files):
            csv_path = csv_files[file_idx - 1]
            
            if choice == 1:
                example_1_basic_info(csv_path)
            
            elif choice == 2:
                example_2_count_rows(csv_path)
            
            elif choice == 3:
                output_path = "output/filtered_data.csv"
                example_3_filter_data(csv_path, output_path)
            
            elif choice == 4:
                example_4_column_statistics(csv_path)
            
            elif choice == 5:
                # 先获取列名
                df_temp = pd.read_csv(csv_path, nrows=1, encoding='utf-8')
                print("\n可用的列:")
                for i, col in enumerate(df_temp.columns, 1):
                    print(f"{i}. {col}")
                
                col_idx = int(input("\n选择列 (输入序号): "))
                if 1 <= col_idx <= len(df_temp.columns):
                    column_name = df_temp.columns[col_idx - 1]
                    example_5_unique_values(csv_path, column_name)
            
            elif choice == 6:
                example_6_data_aggregation(csv_path)
            
            elif choice == 7:
                # 运行所有示例
                example_1_basic_info(csv_path)
                example_2_count_rows(csv_path)
                example_4_column_statistics(csv_path)
                example_6_data_aggregation(csv_path)
                
                print("\n\n所有示例运行完成!")
    
    except ValueError:
        print("请输入有效的数字")
    except KeyboardInterrupt:
        print("\n\n程序已终止")
    except Exception as e:
        print(f"\n错误: {e}")


if __name__ == "__main__":
    main()

