#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
自定义批量处理示例
演示如何使用CustomBatchHandler处理10万级别数据
支持自定义SQL、分批处理、批量写入、多线程并发
"""

import asyncio
import sys
import os
from datetime import datetime, timedelta
from typing import Dict, Any
import polars as pl

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from src.custom_batch_handler import (
    CustomBatchHandler, 
    custom_batch_sync_task, 
    sync_custom_batch_task
)
from loguru import logger


def setup_logging():
    """配置日志"""
    logger.remove()
    logger.add(
        sys.stdout,
        format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
        level="INFO"
    )
    logger.add(
        "logs/custom_batch_{time:YYYY-MM-DD}.log",
        format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}",
        level="DEBUG",
        rotation="1 day",
        retention="7 days"
    )


def custom_data_transform(df: pl.DataFrame) -> pl.DataFrame:
    """
    自定义数据转换函数
    
    Args:
        df: 原始数据DataFrame
    
    Returns:
        转换后的DataFrame
    """
    try:
        # 添加处理时间戳
        df = df.with_columns([
            pl.lit(datetime.now().strftime('%Y-%m-%d %H:%M:%S')).alias('processed_at')
        ])
        
        # 示例：数据清洗和转换
        if 'amount' in df.columns:
            df = df.with_columns([
                # 确保金额为正数
                pl.when(pl.col('amount') < 0).then(0).otherwise(pl.col('amount')).alias('amount'),
                # 添加税后金额
                (pl.col('amount') * 1.13).alias('amount_with_tax')
            ])
        
        if 'status' in df.columns:
            df = df.with_columns([
                # 状态标准化
                pl.col('status').str.upper().alias('status')
            ])
        
        if 'create_time' in df.columns:
            df = df.with_columns([
                # 添加年月字段用于分区
                pl.col('create_time').dt.strftime('%Y-%m').alias('year_month')
            ])
        
        logger.debug(f"数据转换完成，处理 {len(df)} 条记录")
        return df
        
    except Exception as e:
        logger.error(f"数据转换失败: {e}")
        return df


async def example_1_basic_usage():
    """
    示例1: 基础用法 - 同步工单数据
    """
    logger.info("=== 示例1: 基础用法 - 同步工单数据 ===")
    
    # 自定义SQL查询
    sql = """
    SELECT 
        Id,
        WorkOrderId,
        AppCode,
        OrderTypeName,
        WorkStatusName,
        ProName,
        CityName,
        AreaName,
        InstallAddress,
        CustSettleName,
        CustomerName,
        ArtificialServicePrice,
        CompleteTime,
        CreatePersonName
    FROM vi_workcount_log
    WHERE CompleteTime >= %s 
      AND CompleteTime < %s
      AND WorkStatusName = '已完成'
    ORDER BY CompleteTime DESC
    """
    
    # 查询参数（最近7天的数据）
    end_date = datetime.now()
    start_date = end_date - timedelta(days=7)
    params = (start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'))
    
    # 执行同步
    result = await custom_batch_sync_task(
        sql=sql,
        target_table='workcount_log_sync',
        batch_size=3000,  # 每批3000条
        max_concurrent_batches=4,  # 最多4个并发批次
        max_connections=15,  # 最多15个数据库连接
        conflict_strategy='REPLACE',  # 替换冲突数据
        params=params
    )
    
    logger.info(f"同步结果: {result}")
    return result


async def example_2_with_transform():
    """
    示例2: 带数据转换的批量处理
    """
    logger.info("=== 示例2: 带数据转换的批量处理 ===")
    
    # 复杂查询SQL
    sql = """
    SELECT 
        a.Id,
        a.WorkOrderId,
        a.AppCode,
        a.ArtificialServicePrice as amount,
        a.WorkStatusName as status,
        a.CompleteTime as create_time,
        a.ProName,
        a.CityName,
        a.CustomerName,
        b.TypeName as order_type
    FROM vi_workcount_log a
    LEFT JOIN basic_ordertypeinfo b ON a.OrderType = b.TypeCode
    WHERE a.CompleteTime >= %s 
      AND a.CompleteTime < %s
      AND a.ArtificialServicePrice > 0
    ORDER BY a.CompleteTime DESC
    """
    
    # 查询最近30天的数据
    end_date = datetime.now()
    start_date = end_date - timedelta(days=30)
    params = (start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'))
    
    # 执行带转换的同步
    result = await custom_batch_sync_task(
        sql=sql,
        target_table='workcount_log_enhanced',
        batch_size=5000,  # 每批5000条
        max_concurrent_batches=6,  # 最多6个并发批次
        max_connections=20,  # 最多20个数据库连接
        transform_func=custom_data_transform,  # 使用自定义转换函数
        conflict_strategy='UPDATE',  # 更新冲突数据
        params=params
    )
    
    logger.info(f"带转换的同步结果: {result}")
    return result


async def example_3_large_dataset():
    """
    示例3: 大数据集处理（10万+记录）
    """
    logger.info("=== 示例3: 大数据集处理（10万+记录） ===")
    
    # 大数据集查询
    sql = """
    SELECT 
        Id,
        WorkOrderId,
        AppCode,
        OrderTypeName,
        WorkStatusName,
        ProName,
        CityName,
        AreaName,
        CustomerName,
        ArtificialServicePrice,
        CompleteTime,
        CreatePersonName,
        ServiceCode,
        ServiceName
    FROM vi_workcount_log
    WHERE CompleteTime >= %s 
      AND CompleteTime < %s
    ORDER BY CompleteTime DESC
    """
    
    # 查询最近6个月的数据（可能有10万+记录）
    end_date = datetime.now()
    start_date = end_date - timedelta(days=180)
    params = (start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'))
    
    # 针对大数据集的优化配置
    result = await custom_batch_sync_task(
        sql=sql,
        target_table='workcount_log_archive',
        batch_size=8000,  # 每批8000条（适合大数据集）
        max_concurrent_batches=8,  # 最多8个并发批次
        max_connections=25,  # 最多25个数据库连接
        max_workers=6,  # 6个工作线程
        conflict_strategy='IGNORE',  # 忽略冲突（提高性能）
        params=params
    )
    
    logger.info(f"大数据集同步结果: {result}")
    return result


async def example_4_hourly_sync():
    """
    示例4: 每小时增量同步
    """
    logger.info("=== 示例4: 每小时增量同步 ===")
    
    # 增量同步SQL（最近1小时的数据）
    sql = """
    SELECT 
        Id,
        WorkOrderId,
        AppCode,
        OrderTypeName,
        WorkStatusName,
        ProName,
        CityName,
        CustomerName,
        ArtificialServicePrice,
        CompleteTime,
        CreatePersonName,
        UNIX_TIMESTAMP(CompleteTime) as timestamp_key
    FROM vi_workcount_log
    WHERE CompleteTime >= %s 
      AND CompleteTime < %s
      AND (WorkStatusName = '已完成' OR WorkStatusName = '已确认')
    ORDER BY CompleteTime DESC
    """
    
    # 最近1小时的数据
    end_date = datetime.now()
    start_date = end_date - timedelta(hours=1)
    params = (start_date.strftime('%Y-%m-%d %H:%M:%S'), end_date.strftime('%Y-%m-%d %H:%M:%S'))
    
    # 小批量高频同步配置
    result = await custom_batch_sync_task(
        sql=sql,
        target_table='workcount_log_realtime',
        batch_size=1000,  # 小批量
        max_concurrent_batches=3,  # 较少并发
        max_connections=10,  # 较少连接
        conflict_strategy='UPDATE',  # 更新策略
        params=params
    )
    
    logger.info(f"增量同步结果: {result}")
    return result


def example_5_sync_wrapper():
    """
    示例5: 在同步环境中使用（适合调度器）
    """
    logger.info("=== 示例5: 同步环境使用 ===")
    
    sql = """
    SELECT 
        Id,
        WorkOrderId,
        AppCode,
        CustomerName,
        ArtificialServicePrice,
        CompleteTime
    FROM vi_workcount_log
    WHERE CompleteTime >= %s 
      AND CompleteTime < %s
    ORDER BY Id DESC
    """
    
    # 最近24小时
    end_date = datetime.now()
    start_date = end_date - timedelta(hours=24)
    params = (start_date.strftime('%Y-%m-%d %H:%M:%S'), end_date.strftime('%Y-%m-%d %H:%M:%S'))
    
    # 使用同步包装器
    result = sync_custom_batch_task(
        sql=sql,
        target_table='workcount_log_daily',
        batch_size=2000,
        max_concurrent_batches=4,
        params=params
    )
    
    logger.info(f"同步环境结果: {result}")
    return result


async def example_6_performance_test():
    """
    示例6: 性能测试对比
    """
    logger.info("=== 示例6: 性能测试对比 ===")
    
    sql = """
    SELECT 
        Id,
        WorkOrderId,
        AppCode,
        OrderTypeName,
        CustomerName,
        ArtificialServicePrice,
        CompleteTime
    FROM vi_workcount_log
    WHERE CompleteTime >= %s 
      AND CompleteTime < %s
    ORDER BY Id DESC
    LIMIT 50000
    """
    
    end_date = datetime.now()
    start_date = end_date - timedelta(days=90)
    params = (start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'))
    
    # 测试不同配置的性能
    configs = [
        {
            'name': '小批量低并发',
            'batch_size': 1000,
            'max_concurrent_batches': 2,
            'max_connections': 8
        },
        {
            'name': '中批量中并发',
            'batch_size': 5000,
            'max_concurrent_batches': 5,
            'max_connections': 15
        },
        {
            'name': '大批量高并发',
            'batch_size': 10000,
            'max_concurrent_batches': 8,
            'max_connections': 25
        }
    ]
    
    results = []
    for config in configs:
        logger.info(f"测试配置: {config['name']}")
        
        start_time = datetime.now()
        result = await custom_batch_sync_task(
            sql=sql,
            target_table=f"workcount_log_test_{config['name'].replace(' ', '_')}",
            batch_size=config['batch_size'],
            max_concurrent_batches=config['max_concurrent_batches'],
            max_connections=config['max_connections'],
            conflict_strategy='REPLACE',
            params=params
        )
        end_time = datetime.now()
        
        result['config'] = config
        result['total_time'] = (end_time - start_time).total_seconds()
        results.append(result)
        
        logger.info(f"{config['name']} 完成，耗时: {result['total_time']:.2f}秒")
    
    # 输出性能对比
    logger.info("\n=== 性能对比结果 ===")
    for result in results:
        config = result['config']
        logger.info(f"{config['name']}: 成功={result['success_count']}, 耗时={result['total_time']:.2f}s, 速度={result['success_count']/result['total_time']:.0f}条/秒")
    
    return results


async def main():
    """主函数"""
    setup_logging()
    
    logger.info("开始自定义批量处理示例")
    
    try:
        # 运行各种示例
        await example_1_basic_usage()
        await asyncio.sleep(2)
        
        await example_2_with_transform()
        await asyncio.sleep(2)
        
        await example_4_hourly_sync()
        await asyncio.sleep(2)
        
        # 大数据集和性能测试（可选）
        # await example_3_large_dataset()
        # await example_6_performance_test()
        
        logger.info("所有示例执行完成")
        
    except Exception as e:
        logger.exception(f"示例执行失败: {e}")


def sync_main():
    """同步环境主函数"""
    setup_logging()
    
    logger.info("开始同步环境示例")
    
    try:
        # 运行同步示例
        result = example_5_sync_wrapper()
        logger.info(f"同步示例完成: {result}")
        
    except Exception as e:
        logger.exception(f"同步示例执行失败: {e}")


if __name__ == '__main__':
    import argparse
    
    parser = argparse.ArgumentParser(description='自定义批量处理示例')
    parser.add_argument('--sync', action='store_true', help='使用同步模式')
    parser.add_argument('--example', type=int, choices=[1,2,3,4,5,6], help='运行指定示例')
    
    args = parser.parse_args()
    
    if args.sync:
        sync_main()
    elif args.example:
        setup_logging()
        if args.example == 1:
            asyncio.run(example_1_basic_usage())
        elif args.example == 2:
            asyncio.run(example_2_with_transform())
        elif args.example == 3:
            asyncio.run(example_3_large_dataset())
        elif args.example == 4:
            asyncio.run(example_4_hourly_sync())
        elif args.example == 5:
            example_5_sync_wrapper()
        elif args.example == 6:
            asyncio.run(example_6_performance_test())
    else:
        asyncio.run(main())