import asyncio
import aiomysql
import yaml
import time
import os
from datetime import datetime, timedelta
from decimal import Decimal
from loguru import logger
from typing import List, Dict, Any, Optional, Callable
import polars as pl
from concurrent.futures import ThreadPoolExecutor
import threading


class CustomBatchHandler:
    """
    自定义批量数据处理器
    支持自定义SQL查询、分批处理、批量写入、多线程并发
    适用于10万级别数据量的高性能处理
    """
    
    def __init__(self, max_connections: int = 20, max_workers: int = 4):
        self.max_connections = max_connections
        self.max_workers = max_workers
        self.source_pool = None
        self.target_pool = None
        self.config = None
        self._semaphore = asyncio.Semaphore(max_connections)
        self._thread_pool = ThreadPoolExecutor(max_workers=max_workers)
        self._lock = threading.Lock()
    
    async def __aenter__(self):
        await self.initialize_pools()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        await self.close_pools()
        self._thread_pool.shutdown(wait=True)
    
    async def initialize_pools(self):
        """
        初始化数据库连接池
        """
        # 读取配置文件
        config_path = os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 
            'config', 'config.yaml'
        )
        with open(config_path, encoding='utf-8') as f:
            self.config = yaml.safe_load(f)
        
        # 创建源数据库连接池
        self.source_pool = await aiomysql.create_pool(
            host=self.config['source_db']['host'],
            port=self.config['source_db']['port'],
            user=self.config['source_db']['user'],
            password=self.config['source_db']['password'],
            db=self.config['source_db']['database'],
            charset=self.config['source_db'].get('charset', 'utf8mb4'),
            minsize=10,
            maxsize=self.max_connections,
            autocommit=False
        )
        
        # 创建目标数据库连接池
        self.target_pool = await aiomysql.create_pool(
            host=self.config['target_db']['host'],
            port=self.config['target_db']['port'],
            user=self.config['target_db']['user'],
            password=self.config['target_db']['password'],
            db=self.config['target_db']['database'],
            charset=self.config['target_db'].get('charset', 'utf8mb4'),
            minsize=10,
            maxsize=self.max_connections,
            autocommit=False
        )
    
    async def close_pools(self):
        """
        关闭所有连接池
        """
        if self.source_pool:
            self.source_pool.close()
            await self.source_pool.wait_closed()
        
        if self.target_pool:
            self.target_pool.close()
            await self.target_pool.wait_closed()
    
    async def execute_custom_query(self, sql: str, params: tuple = None) -> List[Dict[str, Any]]:
        """
        执行自定义SQL查询
        
        Args:
            sql: SQL查询语句
            params: 查询参数
        
        Returns:
            查询结果列表
        """
        async with self.source_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                try:
                    if params:
                        await cursor.execute(sql, params)
                    else:
                        await cursor.execute(sql)
                    
                    rows = await cursor.fetchall()
                    if rows:
                        columns = [desc[0] for desc in cursor.description]
                        return [dict(zip(columns, row)) for row in rows]
                    return []
                    
                except Exception as e:
                    logger.error(f"[CUSTOM_QUERY] SQL执行失败: {e}")
                    logger.error(f"[CUSTOM_QUERY] SQL: {sql}")
                    raise
    
    async def get_data_count(self, sql: str, params: tuple = None) -> int:
        """
        获取数据总数
        
        Args:
            sql: 查询SQL（会自动转换为COUNT查询）
            params: 查询参数
        
        Returns:
            数据总数
        """
        # 将查询转换为COUNT查询
        count_sql = f"SELECT COUNT(*) as total FROM ({sql}) as temp_table"
        
        async with self.source_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                try:
                    if params:
                        await cursor.execute(count_sql, params)
                    else:
                        await cursor.execute(count_sql)
                    
                    result = await cursor.fetchone()
                    return result[0] if result else 0
                    
                except Exception as e:
                    logger.error(f"[COUNT_QUERY] 获取数据总数失败: {e}")
                    raise
    
    async def fetch_data_batch(self, sql: str, offset: int, limit: int, params: tuple = None) -> List[Dict[str, Any]]:
        """
        分批获取数据
        
        Args:
            sql: 基础SQL查询
            offset: 偏移量
            limit: 限制数量
            params: 查询参数
        
        Returns:
            批次数据
        """
        # 添加分页限制
        paginated_sql = f"{sql} LIMIT {limit} OFFSET {offset}"
        
        return await self.execute_custom_query(paginated_sql, params)
    
    def process_data_with_polars(self, data: List[Dict[str, Any]], 
                                transform_func: Optional[Callable] = None) -> List[Dict[str, Any]]:
        """
        使用Polars进行数据处理优化
        
        Args:
            data: 原始数据
            transform_func: 自定义转换函数
        
        Returns:
            处理后的数据
        """
        if not data:
            return []
        
        try:
            # 转换为Polars DataFrame
            df = pl.DataFrame(data)
            
            # 应用自定义转换函数
            if transform_func:
                df = transform_func(df)
            
            # 转换回字典列表
            return df.to_dicts()
            
        except Exception as e:
            logger.warning(f"[POLARS_PROCESS] Polars处理失败，使用原始数据: {e}")
            return data
    
    async def batch_insert_to_target(self, data: List[Dict[str, Any]], 
                                   target_table: str,
                                   conflict_strategy: str = 'IGNORE') -> bool:
        """
        批量插入数据到目标表
        
        Args:
            data: 要插入的数据
            target_table: 目标表名
            conflict_strategy: 冲突处理策略 ('IGNORE', 'REPLACE', 'UPDATE')
        
        Returns:
            插入是否成功
        """
        if not data:
            return True
        
        # 获取字段名
        keys = list(data[0].keys())
        placeholders = ','.join(['%s'] * len(keys))
        columns = ','.join(f'`{k}`' for k in keys)
        
        # 根据冲突策略构建SQL
        if conflict_strategy == 'IGNORE':
            sql = f"INSERT IGNORE INTO {target_table} ({columns}) VALUES ({placeholders})"
        elif conflict_strategy == 'REPLACE':
            sql = f"REPLACE INTO {target_table} ({columns}) VALUES ({placeholders})"
        elif conflict_strategy == 'UPDATE':
            update_clause = ','.join([f"`{k}`=VALUES(`{k}`)"] for k in keys if k != 'id')
            sql = f"INSERT INTO {target_table} ({columns}) VALUES ({placeholders}) ON DUPLICATE KEY UPDATE {update_clause}"
        else:
            sql = f"INSERT INTO {target_table} ({columns}) VALUES ({placeholders})"
        
        # 准备数据
        values = []
        for item in data:
            row = []
            for k in keys:
                v = item.get(k)
                if isinstance(v, Decimal):
                    v = str(v)
                elif isinstance(v, datetime):
                    v = v.strftime('%Y-%m-%d %H:%M:%S')
                row.append(v)
            values.append(tuple(row))
        
        # 执行批量插入
        async with self.target_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                try:
                    await cursor.executemany(sql, values)
                    await conn.commit()
                    logger.info(f"[BATCH_INSERT] 成功插入 {len(values)} 条记录到 {target_table}")
                    return True
                    
                except Exception as e:
                    await conn.rollback()
                    logger.error(f"[BATCH_INSERT] 插入失败，已回滚: {e}")
                    return False
    
    async def process_batch_async(self, batch_data: List[Dict[str, Any]], 
                                batch_num: int,
                                target_table: str,
                                transform_func: Optional[Callable] = None,
                                conflict_strategy: str = 'IGNORE') -> Dict[str, Any]:
        """
        异步处理单个批次
        
        Args:
            batch_data: 批次数据
            batch_num: 批次编号
            target_table: 目标表名
            transform_func: 数据转换函数
            conflict_strategy: 冲突处理策略
        
        Returns:
            处理结果统计
        """
        async with self._semaphore:
            try:
                logger.info(f"[BATCH_PROCESS] 开始处理批次 {batch_num}, 记录数: {len(batch_data)}")
                
                # 数据处理
                processed_data = self.process_data_with_polars(batch_data, transform_func)
                
                # 批量插入
                success = await self.batch_insert_to_target(
                    processed_data, target_table, conflict_strategy
                )
                
                if success:
                    logger.info(f"[BATCH_PROCESS] 批次 {batch_num} 处理完成")
                    return {
                        'batch_num': batch_num,
                        'success_count': len(batch_data),
                        'error_count': 0,
                        'errors': []
                    }
                else:
                    error_msg = f"批次 {batch_num} 插入失败"
                    logger.error(error_msg)
                    return {
                        'batch_num': batch_num,
                        'success_count': 0,
                        'error_count': len(batch_data),
                        'errors': [error_msg]
                    }
                    
            except Exception as e:
                error_msg = f"批次 {batch_num} 处理异常: {str(e)}"
                logger.error(error_msg)
                return {
                    'batch_num': batch_num,
                    'success_count': 0,
                    'error_count': len(batch_data),
                    'errors': [error_msg]
                }
    
    async def custom_batch_sync(self, 
                              sql: str,
                              target_table: str,
                              batch_size: int = 5000,
                              max_concurrent_batches: int = 5,
                              transform_func: Optional[Callable] = None,
                              conflict_strategy: str = 'IGNORE',
                              params: tuple = None) -> Dict[str, Any]:
        """
        自定义批量同步主函数
        
        Args:
            sql: 自定义查询SQL
            target_table: 目标表名
            batch_size: 批处理大小
            max_concurrent_batches: 最大并发批次数
            transform_func: 数据转换函数
            conflict_strategy: 冲突处理策略
            params: SQL参数
        
        Returns:
            同步结果统计
        """
        start_time = time.time()
        total_success = 0
        total_error = 0
        error_details = []
        
        logger.info(f"[CUSTOM_SYNC] 开始自定义批量同步任务")
        logger.info(f"[CUSTOM_SYNC] 目标表: {target_table}, 批次大小: {batch_size}")
        
        try:
            # 获取数据总数
            total_count = await self.get_data_count(sql, params)
            logger.info(f"[CUSTOM_SYNC] 数据总数: {total_count}")
            
            if total_count == 0:
                logger.info(f"[CUSTOM_SYNC] 没有数据需要同步")
                return {
                    'success_count': 0,
                    'error_count': 0,
                    'error_details': [],
                    'duration': time.time() - start_time,
                    'total_count': 0
                }
            
            # 计算批次数
            total_batches = (total_count + batch_size - 1) // batch_size
            logger.info(f"[CUSTOM_SYNC] 总批次数: {total_batches}")
            
            # 使用信号量控制并发批次数
            semaphore = asyncio.Semaphore(max_concurrent_batches)
            
            async def process_single_batch(batch_num: int, offset: int):
                async with semaphore:
                    try:
                        # 获取批次数据
                        batch_data = await self.fetch_data_batch(
                            sql, offset, batch_size, params
                        )
                        
                        if not batch_data:
                            return {
                                'batch_num': batch_num,
                                'success_count': 0,
                                'error_count': 0,
                                'errors': []
                            }
                        
                        # 处理批次
                        return await self.process_batch_async(
                            batch_data, batch_num, target_table, 
                            transform_func, conflict_strategy
                        )
                        
                    except Exception as e:
                        error_msg = f"批次 {batch_num} 获取数据失败: {str(e)}"
                        logger.error(error_msg)
                        return {
                            'batch_num': batch_num,
                            'success_count': 0,
                            'error_count': batch_size,
                            'errors': [error_msg]
                        }
            
            # 创建所有批次任务
            tasks = []
            for i in range(total_batches):
                offset = i * batch_size
                batch_num = i + 1
                tasks.append(process_single_batch(batch_num, offset))
            
            # 并发执行所有批次
            logger.info(f"[CUSTOM_SYNC] 开始并发处理 {len(tasks)} 个批次")
            results = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 统计结果
            for result in results:
                if isinstance(result, Exception):
                    error_details.append(f"任务执行异常: {str(result)}")
                    total_error += batch_size  # 估算错误数
                else:
                    total_success += result['success_count']
                    total_error += result['error_count']
                    error_details.extend(result['errors'])
            
            duration = time.time() - start_time
            logger.info(f"[CUSTOM_SYNC] 同步完成, 成功={total_success}, 失败={total_error}, 耗时={duration:.2f}s")
            
            return {
                'success_count': total_success,
                'error_count': total_error,
                'error_details': error_details,
                'duration': duration,
                'total_count': total_count,
                'total_batches': total_batches
            }
            
        except Exception as e:
            logger.exception(f"[CUSTOM_SYNC] 同步任务执行失败: {e}")
            return {
                'success_count': total_success,
                'error_count': total_error,
                'error_details': error_details + [f"任务执行异常: {str(e)}"],
                'duration': time.time() - start_time,
                'total_count': 0,
                'total_batches': 0
            }


# 便捷函数，用于在调度器中调用
async def custom_batch_sync_task(sql: str,
                               target_table: str,
                               batch_size: int = 5000,
                               max_concurrent_batches: int = 5,
                               max_connections: int = 20,
                               max_workers: int = 4,
                               transform_func: Optional[Callable] = None,
                               conflict_strategy: str = 'IGNORE',
                               params: tuple = None,
                               **kwargs) -> Dict[str, Any]:
    """
    自定义批量同步任务的便捷入口函数
    
    Args:
        sql: 自定义查询SQL
        target_table: 目标表名
        batch_size: 批处理大小（建议5000-10000）
        max_concurrent_batches: 最大并发批次数（建议3-8）
        max_connections: 最大数据库连接数
        max_workers: 最大线程池工作线程数
        transform_func: 数据转换函数
        conflict_strategy: 冲突处理策略
        params: SQL参数
    
    Returns:
        同步结果统计
    """
    async with CustomBatchHandler(max_connections=max_connections, 
                                max_workers=max_workers) as handler:
        return await handler.custom_batch_sync(
            sql=sql,
            target_table=target_table,
            batch_size=batch_size,
            max_concurrent_batches=max_concurrent_batches,
            transform_func=transform_func,
            conflict_strategy=conflict_strategy,
            params=params
        )


# 同步包装器，用于在非异步环境中调用
def sync_custom_batch_task(sql: str,
                         target_table: str,
                         batch_size: int = 5000,
                         max_concurrent_batches: int = 5,
                         max_connections: int = 20,
                         max_workers: int = 4,
                         transform_func: Optional[Callable] = None,
                         conflict_strategy: str = 'IGNORE',
                         params: tuple = None,
                         **kwargs) -> Dict[str, Any]:
    """
    在同步环境中运行自定义批量同步任务
    """
    return asyncio.run(custom_batch_sync_task(
        sql=sql,
        target_table=target_table,
        batch_size=batch_size,
        max_concurrent_batches=max_concurrent_batches,
        max_connections=max_connections,
        max_workers=max_workers,
        transform_func=transform_func,
        conflict_strategy=conflict_strategy,
        params=params,
        **kwargs
    ))


# 示例数据转换函数
def example_transform_func(df: pl.DataFrame) -> pl.DataFrame:
    """
    示例数据转换函数
    
    Args:
        df: Polars DataFrame
    
    Returns:
        转换后的 DataFrame
    """
    return df.with_columns([
        # 添加处理时间戳
        pl.lit(datetime.now().strftime('%Y-%m-%d %H:%M:%S')).alias('processed_at'),
        # 示例：字符串处理
        pl.col('some_column').str.upper().alias('some_column_upper') if 'some_column' in df.columns else pl.lit(None).alias('some_column_upper'),
        # 示例：数值计算
        (pl.col('amount') * 1.1).alias('amount_with_tax') if 'amount' in df.columns else pl.lit(None).alias('amount_with_tax')
    ])