import logging
import time
import pymysql
from typing import Dict, List, Any, Optional
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor, as_completed

from ..utils.mysql_db import get_db_cursor, DatabaseLogger
from ..utils.starrocks_db import get_starrocks_cursor, StarRocksLogger

logger = logging.getLogger(__name__)


class StarRocksTableService:
    """StarRocks表管理服务 - 支持多实例"""
    
    def __init__(self):
        logger.info("StarRocksTableService initialized with multi-instance support.")
    
    def _get_starrocks_config(self, config_id: int) -> Dict[str, Any]:
        """获取StarRocks配置信息"""
        try:
            with get_db_cursor() as cursor:
                cursor.execute("""
                    SELECT id, config_name, host, port, username, password, 
                           `database`, extra_config, description
                    FROM service_config 
                    WHERE id = %s AND service_type = 'starrocks' 
                    AND is_deleted = 0 AND is_active = 1
                """, (config_id,))
                config = cursor.fetchone()
                if not config:
                    raise ValueError(f"StarRocks配置不存在或未激活: config_id={config_id}")
                
                # 解析extra_config
                if config.get('extra_config'):
                    import json
                    extra = json.loads(config['extra_config'])
                    config.update(extra)
                
                return config
        except Exception as e:
            logger.error(f"获取StarRocks配置失败: {e}")
            raise
    
    def _create_starrocks_connection(self, config: Dict[str, Any]) -> pymysql.Connection:
        """根据配置创建StarRocks连接（带重试机制）"""
        connection_params = {
            'host': config['host'],
            'port': config['port'],
            'user': config['username'],
            'password': config['password'],
            'database': config.get('database', ''),
            'charset': config.get('charset', 'utf8mb4'),
            'cursorclass': pymysql.cursors.DictCursor,
            'autocommit': config.get('autocommit', False),
            'connect_timeout': config.get('connect_timeout', 10),
            'read_timeout': config.get('read_timeout', 30),
            'write_timeout': config.get('write_timeout', 30)
        }
        
        # 添加重试逻辑（与其他数据库连接保持一致）
        max_retries = 3
        last_error = None
        
        for retry in range(max_retries):
            try:
                if retry > 0:
                    logger.info(f"🔄 重试StarRocks连接 (尝试{retry+1}/{max_retries}) [{config.get('config_name', 'Unknown')}]")
                
                conn = pymysql.connect(**connection_params)
                # 连接创建日志降为 DEBUG 级别，避免批量操作时输出过多日志
                logger.debug(f"StarRocks连接创建成功: {config['config_name']} ({config['host']}:{config['port']})")
                return conn
                
            except Exception as e:
                last_error = e
                logger.error(f"创建StarRocks连接失败 (尝试{retry+1}/{max_retries}) [{config.get('config_name', 'Unknown')}]: {e}")
                
                if retry < max_retries - 1:
                    # 递增等待：第1次失败等30秒，第2次失败等60秒
                    wait_time = 30 if retry == 0 else 60
                    logger.info(f"⏰ 等待{wait_time}秒后重试...")
                    time.sleep(wait_time)
                else:
                    # 3次都失败了
                    logger.error(f"❌ StarRocks连接重试{max_retries}次均失败")
        
        # 最终失败，抛出最后的异常
        raise last_error

    def get_tables(self, config_id: int, page: int = 1, page_size: int = 20, **filters) -> Dict[str, Any]:
        """获取表列表（支持分页和过滤）"""
        try:
            with get_db_cursor() as cursor:
                # 构建WHERE条件（直接查询主表）
                where_conditions = []
                params = []
                
                # config_id已经在SQL中直接使用，不放在where_conditions中
                
                # 过滤系统数据库
                where_conditions.append("t.database_name NOT IN ('information_schema', '_statistics_', 'starrocks_audit_db__', 'sys', 'mysql', 'performance_schema')")

                if filters.get('database'):
                    where_conditions.append("t.database_name = %s")
                    params.append(filters['database'])

                if filters.get('search'):
                    where_conditions.append("(t.table_name LIKE %s OR t.table_comment LIKE %s)")
                    search_term = f"%{filters['search']}%"
                    params.extend([search_term, search_term])

                if filters.get('monitor_enabled'):
                    where_conditions.append("t.monitor_enabled = %s")
                    params.append(1 if filters['monitor_enabled'] == 'true' else 0)
                
                # 新增筛选条件：存储介质
                if filters.get('storage_medium'):
                    where_conditions.append("t.storage_medium = %s")
                    params.append(filters['storage_medium'])
                
                # 新增筛选条件：是否分区表
                if filters.get('is_partitioned'):
                    where_conditions.append("t.is_partitioned = %s")
                    params.append(1 if filters['is_partitioned'] == 'true' else 0)

                # 构建额外的WHERE条件
                extra_where = " AND ".join(where_conditions) if where_conditions else "1=1"

                # 计算总数（直接查主表）
                cursor.execute(f"""
                    SELECT COUNT(*) as total 
                    FROM starrocks_tables t
                    WHERE t.config_id = %s AND {extra_where}
                """, [config_id] + params)
                total = cursor.fetchone()['total']

                # 处理排序参数
                sort_field = filters.get('sort', 'updated_at')
                sort_order = filters.get('order', 'DESC').upper()
                
                # 白名单验证排序字段，防止SQL注入（主表字段 - 三增量设计）
                allowed_sort_fields = {
                    'table_name': 'table_name',
                    'database_name': 'database_name',
                    'total_rows': 'latest_total_rows',
                    'data_length': 'latest_data_length',
                    'updated_at': 'updated_at',
                    'created_at': 'created_at',
                    'previous_increment_rows': 'previous_increment_rows',
                    'current_increment_rows': 'current_increment_rows',
                    'total_increment_rows': 'total_increment_rows',
                    'last_stat_time': 'last_stat_time'
                }
                
                sort_column = allowed_sort_fields.get(sort_field, 'updated_at')
                if sort_order not in ['ASC', 'DESC']:
                    sort_order = 'DESC'

                # 获取分页数据 - 直接查询主表（纯增量设计）
                offset = (page - 1) * page_size
                cursor.execute(f"""
                    SELECT 
                        t.id,
                        t.config_id,
                        sc.config_name AS starrocks_config_name,
                        sc.host AS starrocks_host,
                        sc.port AS starrocks_port,
                        t.database_name,
                        t.table_name,
                        t.table_comment,
                        t.engine,
                        t.table_type,
                        t.is_partitioned,
                        t.partition_field,
                        t.key_type,
                        t.storage_medium,
                        t.total_columns,
                        t.monitor_enabled,
                        t.last_sync_time,
                        -- 最新统计数据
                        t.latest_total_rows AS total_rows,
                        t.latest_data_length AS data_length,
                        t.latest_index_length AS index_length,
                        t.last_stat_time,
                        -- 三增量设计（上次、当前、总计）
                        t.previous_increment_rows,
                        t.current_increment_rows,
                        t.total_increment_rows,
                        t.previous_increment_data,
                        t.current_increment_data,
                        t.total_increment_data,
                        -- 当天基准
                        t.today_start_total_rows,
                        t.today_start_date,
                        -- 统计类型
                        t.last_stat_type,
                        t.created_at,
                        t.updated_at
                    FROM starrocks_tables t
                    LEFT JOIN service_config sc ON t.config_id = sc.id AND sc.service_type = 'starrocks'
                    WHERE t.config_id = %s AND {extra_where}
                    ORDER BY t.{sort_column} {sort_order}
                    LIMIT %s OFFSET %s
                """, [config_id] + params + [page_size, offset])

                tables = cursor.fetchall()
                
                # 转换datetime为字符串，避免Flask自动加GMT标记
                for table in tables:
                    if table.get('last_stat_time'):
                        table['last_stat_time'] = table['last_stat_time'].strftime('%Y-%m-%d %H:%M:%S')
                    if table.get('created_at'):
                        table['created_at'] = table['created_at'].strftime('%Y-%m-%d %H:%M:%S')
                    if table.get('updated_at'):
                        table['updated_at'] = table['updated_at'].strftime('%Y-%m-%d %H:%M:%S')
                    if table.get('last_sync_time'):
                        table['last_sync_time'] = table['last_sync_time'].strftime('%Y-%m-%d %H:%M:%S')

                return {
                    'success': True,
                    'data': {
                    'tables': tables,
                    'pagination': {
                        'page': page,
                        'pageSize': page_size,
                        'total': total,
                        'pages': (total + page_size - 1) // page_size
                        }
                    }
                }

        except Exception as e:
            logger.error(f"❌ 获取表列表失败: {e}")
            raise

    def get_table_detail(self, table_id: int) -> Dict[str, Any]:
        """获取表详情 - 直接查询主表（纯增量设计）"""
        try:
            with get_db_cursor() as cursor:
                # 直接查询主表，包含增量数据
                cursor.execute("""
                    SELECT 
                        t.*,
                        sc.config_name AS starrocks_config_name,
                        sc.host AS starrocks_host,
                        sc.port AS starrocks_port
                    FROM starrocks_tables t
                    LEFT JOIN service_config sc ON t.config_id = sc.id AND sc.service_type = 'starrocks'
                    WHERE t.id = %s
                """, (table_id,))
                table_info = cursor.fetchone()
                
                if not table_info:
                    return {"success": False, "error": "表不存在"}, 404

                # 查询字段信息
                cursor.execute("""
                    SELECT * FROM starrocks_table_columns 
                    WHERE table_id = %s 
                    ORDER BY ordinal_position
                """, (table_id,))
                columns = cursor.fetchall()
                
                # 转换datetime为字符串
                if table_info.get('created_at'):
                    table_info['created_at'] = table_info['created_at'].strftime('%Y-%m-%d %H:%M:%S')
                if table_info.get('updated_at'):
                    table_info['updated_at'] = table_info['updated_at'].strftime('%Y-%m-%d %H:%M:%S')
                if table_info.get('last_sync_time'):
                    table_info['last_sync_time'] = table_info['last_sync_time'].strftime('%Y-%m-%d %H:%M:%S')
                if table_info.get('last_stat_time'):
                    table_info['last_stat_time'] = table_info['last_stat_time'].strftime('%Y-%m-%d %H:%M:%S')

                return {
                    'success': True,
                    'data': {
                        'table_info': table_info,
                        'columns': columns,
                        'latest_stats': None  # 已废弃，数据在 table_info 中
                    }
                }
        except Exception as e:
            logger.error(f"❌ 获取表详情失败: {e}")
            raise

    def toggle_monitor_status(self, table_id: int, monitor_enabled: bool) -> Dict[str, Any]:
        """切换表监控状态"""
        try:
            with get_db_cursor(autocommit=True) as cursor:
                cursor.execute("""
                    UPDATE starrocks_tables SET monitor_enabled = %s, updated_at = NOW()
                    WHERE id = %s
                """, (monitor_enabled, table_id))
                if cursor.rowcount == 0:
                    return {"error": "表不存在"}, 404
                return {"success": True, "message": f"监控状态已{'启用' if monitor_enabled else '禁用'}"}
        except Exception as e:
            logger.error(f"❌ 切换表监控状态失败: {e}")
            raise

    def count_table_data(self, config_id: int, table_id: int) -> Dict[str, Any]:
        """手动使用COUNT查询统计表数据 - 纯增量设计"""
        start_time = time.time()
        sr_conn = None
        try:
            # 1. 获取表信息和上次统计值
            with get_db_cursor() as cursor:
                cursor.execute("""
                    SELECT database_name, table_name, config_id,
                           latest_total_rows, latest_data_length, latest_index_length,
                           current_increment_rows, total_increment_rows,
                           today_start_total_rows, today_start_data_length, today_start_date
                    FROM starrocks_tables 
                    WHERE id = %s
                """, (table_id,))
                table_info = cursor.fetchone()
                
                if not table_info:
                    return {"error": "表不存在"}, 404
                
                if table_info['config_id'] != config_id:
                    return {"error": "config_id不匹配"}, 400

            database = table_info['database_name']
            table_name = table_info['table_name']
            prev_total_rows = table_info['latest_total_rows'] or 0
            prev_data_length = table_info['latest_data_length'] or 0
            prev_index_length = table_info['latest_index_length'] or 0
            prev_current_increment = table_info['current_increment_rows'] or 0
            today_start_total = table_info['today_start_total_rows'] or 0
            today_start_data = table_info['today_start_data_length'] or 0
            today_start_date = table_info['today_start_date']

            # 2. 执行COUNT查询获取当前行数（带重试机制）
            config = self._get_starrocks_config(config_id)
            config['read_timeout'] = 300  # 5分钟超时
            
            current_total_rows = 0
            max_retries = 3
            retry_count = 0
            
            while retry_count < max_retries:
                sr_conn = None
                try:
                    sr_conn = self._create_starrocks_connection(config)
                    starrocks_cursor = sr_conn.cursor()
                    
                    if retry_count > 0:
                        logger.info(f"🔄 重试COUNT查询 ({retry_count}/{max_retries}): {database}.{table_name}")
                    else:
                        logger.info(f"⏱️  开始COUNT查询: {database}.{table_name}")
                    
                    starrocks_cursor.execute(f"SELECT COUNT(*) as total_rows FROM `{database}`.`{table_name}`")
                    result = starrocks_cursor.fetchone()
                    current_total_rows = result['total_rows'] if result else 0
                    starrocks_cursor.close()
                    
                    logger.info(f"✅ COUNT成功: {database}.{table_name} - {current_total_rows}行")
                    break  # 成功，跳出重试循环
                    
                except Exception as count_error:
                    retry_count += 1
                    logger.warning(f"⚠️ COUNT失败 (尝试{retry_count}/{max_retries}): {database}.{table_name} - {count_error}")
                    
                    if retry_count >= max_retries:
                        # 重试3次后仍失败，使用降级方案
                        logger.error(f"❌ COUNT查询重试{max_retries}次均失败，使用information_schema估算")
                        try:
                            if sr_conn:
                                cursor = sr_conn.cursor()
                                cursor.execute(f"""
                                    SELECT COALESCE(TABLE_ROWS, 0) as total_rows
                                    FROM information_schema.TABLES
                                    WHERE TABLE_SCHEMA = '{database}' AND TABLE_NAME = '{table_name}'
                                """)
                                result = cursor.fetchone()
                                current_total_rows = result['total_rows'] if result else 0
                                cursor.close()
                                logger.warning(f"⚠️ 使用information_schema估算值: {current_total_rows}")
                        except:
                            current_total_rows = prev_total_rows  # 使用上次值
                            logger.error(f"❌ 降级方案也失败，使用上次值: {current_total_rows}")
                    else:
                        # 递增等待时间：第1次等30秒，第2次等60秒
                        wait_time = 30 if retry_count == 1 else 60
                        logger.info(f"⏰ 等待{wait_time}秒后重试...")
                        time.sleep(wait_time)
                finally:
                    if sr_conn:
                        sr_conn.close()
                        sr_conn = None  # 重要：关闭后设置为 None，避免外层 finally 重复关闭

            # 3. 检查是否跨天（用于总增量重置）
            current_date = datetime.now().date()
            if today_start_date is None or today_start_date < current_date:
                # 跨天了，重置今天的基准
                today_start_total = prev_total_rows  # 昨天最后的值作为今天的起点
                today_start_data = prev_data_length
                today_start_date = current_date
                logger.info(f"📅 表 {table_name} 跨天，重置当天基准: {prev_total_rows}行")

            # 4. 计算三增量
            previous_increment_rows = prev_current_increment  # 上次的增量值
            current_increment_rows = current_total_rows - prev_total_rows  # 当前增量
            total_increment_rows = current_total_rows - today_start_total  # 总增量（当天累计）
            
            previous_increment_data = 0
            current_increment_data = 0
            total_increment_data = 0
            
            collection_duration_ms = int((time.time() - start_time) * 1000)

            # 5. 更新主表（三增量设计 + 当天基准）
            with get_db_cursor(autocommit=True) as cursor:
                cursor.execute("""
                    UPDATE starrocks_tables SET
                        latest_total_rows = %s,
                        latest_data_length = %s,
                        latest_index_length = %s,
                        previous_increment_rows = %s,
                        current_increment_rows = %s,
                        total_increment_rows = %s,
                        previous_increment_data = %s,
                        current_increment_data = %s,
                        total_increment_data = %s,
                        today_start_total_rows = %s,
                        today_start_data_length = %s,
                        today_start_date = %s,
                        last_stat_time = NOW(),
                        last_stat_type = 'manual',
                        updated_at = NOW()
                    WHERE id = %s
                """, (
                    current_total_rows,
                    prev_data_length,
                    prev_index_length,
                    previous_increment_rows,
                    current_increment_rows,
                    total_increment_rows,
                    previous_increment_data,
                    current_increment_data,
                    total_increment_data,
                    today_start_total,
                    today_start_data,
                    today_start_date,
                    table_id
                ))
                
                # 6. 写入历史记录（三增量设计）
                cursor.execute("""
                    INSERT INTO starrocks_table_stats (
                        config_id, table_id, database_name, table_name, 
                        stat_time, stat_type, 
                        total_rows, data_length, index_length, 
                        previous_increment_rows, current_increment_rows, total_increment_rows,
                        previous_increment_data, current_increment_data, total_increment_data,
                        collection_method, collection_duration_ms
                    ) VALUES (%s, %s, %s, %s, NOW(), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                """, (
                    config_id, table_id, database, table_name,
                    'manual',
                    current_total_rows, prev_data_length, prev_index_length,
                    previous_increment_rows, current_increment_rows, total_increment_rows,
                    previous_increment_data, current_increment_data, total_increment_data,
                    'count_query', collection_duration_ms
                ))
            
            logger.info(f"✅ COUNT统计完成: {database}.{table_name} - {current_total_rows}行 (当前增量{current_increment_rows:+d}, 总增量{total_increment_rows:+d})")
                
            # 6. 返回结果（三增量设计）
            return {
                'success': True,
                'table_id': table_id,
                'database': database,
                'table_name': table_name,
                'total_rows': current_total_rows,
                'data_length': prev_data_length,
                'previous_increment': previous_increment_rows,
                'current_increment': current_increment_rows,
                'total_increment': total_increment_rows,
                'collection_duration_ms': collection_duration_ms,
                'stat_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }
        except Exception as e:
            logger.error(f"❌ COUNT统计失败: {e}")
            raise
        finally:
            # sr_conn 在 while 循环的 finally 中已经关闭并设置为 None
            # 这里的检查是为了处理在 while 循环之前发生异常的情况
            if sr_conn:
                sr_conn.close()

    def get_overview_stats(self, config_id: int) -> Dict[str, Any]:
        """获取总体统计信息 - 直接从主表统计（纯增量设计）"""
        try:
            with get_db_cursor() as cursor:
                cursor.execute("""
                    SELECT 
                        COUNT(DISTINCT t.id) as total_tables,
                        COUNT(DISTINCT t.database_name) as total_databases,
                        SUM(CASE WHEN t.monitor_enabled = 1 THEN 1 ELSE 0 END) as monitored_tables,
                        COALESCE(SUM(t.latest_total_rows), 0) as total_data_rows
                    FROM starrocks_tables t
                    WHERE t.config_id = %s
                    AND t.database_name NOT IN ('information_schema', '_statistics_', 'starrocks_audit_db__', 'sys', 'mysql', 'performance_schema')
                """, (config_id,))
                stats = cursor.fetchone()
                return {
                    'success': True,
                    'data': {
                        'total_tables': stats['total_tables'],
                        'total_databases': stats['total_databases'],
                        'monitored_tables': stats['monitored_tables'],
                        'total_data_rows': stats['total_data_rows']
                    }
                }
        except Exception as e:
            logger.error(f"❌ 获取总体统计失败: {e}")
            raise

    def get_databases(self, config_id: int) -> List[Dict[str, Any]]:
        """获取数据库列表及表数量（过滤系统库）"""
        try:
            with get_db_cursor() as cursor:
                cursor.execute("""
                    SELECT database_name, COUNT(*) as table_count
                    FROM starrocks_tables
                    WHERE config_id = %s
                    AND database_name NOT IN ('information_schema', '_statistics_', 'starrocks_audit_db__', 'sys', 'mysql', 'performance_schema')
                    GROUP BY database_name
                    ORDER BY database_name
                """, (config_id,))
                return cursor.fetchall()
        except Exception as e:
            logger.error(f"❌ 获取数据库列表失败: {e}")
            raise

    def get_monitor_config(self) -> List[Dict[str, Any]]:
        """获取监控配置"""
        try:
            with get_db_cursor() as cursor:
                cursor.execute("SELECT config_key, config_value, config_type, description FROM starrocks_monitor_config")
                configs = cursor.fetchall()
                return configs  # 直接返回数组格式
        except Exception as e:
            logger.error(f"❌ 获取监控配置失败: {e}")
            raise

    def update_monitor_config(self, config_updates: Dict[str, Any]) -> Dict[str, Any]:
        """更新监控配置"""
        try:
            with get_db_cursor(autocommit=True) as cursor:
                updated_count = 0
                for config_key, config_value in config_updates.items():
                    cursor.execute("""
                        UPDATE starrocks_monitor_config 
                        SET config_value = %s, updated_at = NOW()
                        WHERE config_key = %s
                    """, (str(config_value), config_key))
                    if cursor.rowcount > 0:
                        updated_count += 1
                return {
                    'success': True,
                    'updated_count': updated_count,
                    'message': f'成功更新 {updated_count} 个配置项'
                }
        except Exception as e:
            logger.error(f"❌ 更新监控配置失败: {e}")
            raise

    def one_click_update(self, config_id: int) -> Dict[str, Any]:
        """一键更新：发现最新表 → 统计所有表数据 → 更新表结构"""
        start_time = time.time()
        try:
            logger.info(f"🚀 开始一键更新操作 [config_id={config_id}]")

            # 第一步：发现最新表
            logger.info("📋 步骤1: 发现最新表")
            discovery_result = self._discover_new_tables(config_id)
            new_tables = discovery_result.get('new_tables', 0)
            logger.info(f"✅ 发现 {new_tables} 个新表")

            # 第二步：统计所有表数据（使用information_schema快速模式）
            logger.info("📊 步骤2: 统计所有表数据量")
            stats_result = self.execute_data_statistics(
                config_id=config_id,
                database='',  # 所有数据库
                collection_method='information_schema',  # 使用快速模式
                selected_table_ids=[]  # 所有表
            )
            statistics_tables = stats_result.get('processed_tables', 0)
            logger.info(f"✅ 统计完成 {statistics_tables} 个表")

            # 第三步：更新所有表结构
            logger.info("🔧 步骤3: 更新所有表结构")
            schema_result = self.execute_schema_update(
                config_id=config_id,
                database='',  # 所有数据库
                selected_table_ids=[]  # 所有表
            )
            structure_updated_tables = schema_result.get('processed_tables', 0)
            logger.info(f"✅ 结构更新完成 {structure_updated_tables} 个表")

            elapsed_seconds = round(time.time() - start_time, 2)

            result = {
                'success': True,
                'new_tables': new_tables,
                'statistics_tables': statistics_tables,
                'structure_updated_tables': structure_updated_tables,
                'elapsed_seconds': elapsed_seconds,
                'summary': f"一键更新完成: 新增{new_tables}表, 统计{statistics_tables}表, 更新{structure_updated_tables}表结构, 耗时{elapsed_seconds}秒"
            }
            logger.info(f"🎉 一键更新操作完成: {result['summary']}")
            return result
        except Exception as e:
            elapsed_seconds = round(time.time() - start_time, 2)
            logger.error(f"❌ 一键更新操作失败: {e}, 耗时{elapsed_seconds}秒")
            raise

    def fetch_latest_tables(self, config_id: int) -> Dict[str, Any]:
        """获取最新表：发现新表 → 统计数据 → 同步字段（完整初始化）"""
        start_time = time.time()
        new_tables_processed = 0
        try:
            logger.info(f"🚀 开始获取最新表操作 [config_id={config_id}]")
            
            # 1. 发现新表
            discovery_result = self._discover_new_tables(config_id)
            new_tables = discovery_result.get('new_tables', 0)
            
            if new_tables > 0:
                logger.info(f"🆕 发现 {new_tables} 个新表，开始完整初始化...")
                
                # 2. 获取刚刚插入的新表ID列表
                with get_db_cursor() as cursor:
                    cursor.execute("""
                        SELECT id, database_name, table_name, monitor_enabled
                        FROM starrocks_tables 
                        WHERE config_id = %s AND created_at >= %s
                    """, (config_id, datetime.now() - timedelta(minutes=5)))
                    new_table_records = cursor.fetchall()
                
                if new_table_records:
                    # 3. 统计新表数据量
                    logger.info(f"📊 步骤1: 统计 {len(new_table_records)} 个新表的数据量...")
                    stats_result = self._batch_statistics_from_schema(config_id, new_table_records, ['manual'])
                    new_tables_processed = stats_result.get('success_count', 0)
                    logger.info(f"✅ 数据统计完成：成功 {new_tables_processed} 个")
                    
                    # 4. 同步新表的字段信息和表结构
                    logger.info(f"🔧 步骤2: 同步 {len(new_table_records)} 个新表的字段信息...")
                    try:
                        # 批量分析表结构
                        structure_results = self._batch_analyze_table_structure(config_id, new_table_records)
                        # 批量更新表基本信息
                        self._batch_update_table_info(config_id, structure_results)
                        # 批量同步字段信息
                        self._batch_sync_table_columns(config_id, new_table_records)
                        logger.info(f"✅ 字段同步完成")
                    except Exception as sync_error:
                        logger.warning(f"⚠️ 字段同步部分失败: {sync_error}")
            
            elapsed_seconds = round(time.time() - start_time, 2)
            result = {
                'success': True,
                'new_tables': new_tables,
                'processed_tables': new_tables_processed,
                'elapsed_seconds': elapsed_seconds,
                'summary': f"获取最新表完成: 新增{new_tables}表, 统计{new_tables_processed}表, 耗时{elapsed_seconds}秒"
            }
            logger.info(f"🎉 获取最新表操作完成（含字段同步）: {result['summary']}")
            return result
        except Exception as e:
            elapsed_seconds = round(time.time() - start_time, 2)
            logger.error(f"❌ 获取最新表操作失败: {e}, 耗时{elapsed_seconds}秒")
            raise

    def execute_data_statistics(self, config_id: int, database: str, collection_method: str, selected_table_ids: List[int], stat_type: str = 'manual') -> Dict[str, Any]:
        """执行数据统计 - 纯增量设计
        
        Args:
            config_id: StarRocks配置ID
            database: 数据库名称
            collection_method: 收集方法 (information_schema 或 count_query)
            selected_table_ids: 要统计的表ID列表
            stat_type: 统计类型 ('manual'=手动, 'hourly'=小时定时)
        """
        start_time = time.time()
        processed_tables = 0
        success_tables = 0
        failed_tables = 0
        
        try:
            logger.info(f"🚀 开始批量数据统计 [config_id={config_id}] - 方法: {collection_method}, 类型: {stat_type}")
            
            # 获取要处理的表列表（添加config_id过滤）
            with get_db_cursor() as cursor:
                if selected_table_ids:
                    placeholders = ','.join(['%s'] * len(selected_table_ids))
                    cursor.execute(f"SELECT id, database_name, table_name, monitor_enabled FROM starrocks_tables WHERE config_id = %s AND id IN ({placeholders})", [config_id] + selected_table_ids)
                elif database:
                    cursor.execute("SELECT id, database_name, table_name, monitor_enabled FROM starrocks_tables WHERE config_id = %s AND database_name = %s", (config_id, database))
                else:
                    cursor.execute("SELECT id, database_name, table_name, monitor_enabled FROM starrocks_tables WHERE config_id = %s", (config_id,))
                tables_to_process = cursor.fetchall()

            # 过滤只处理启用监控的表
            monitored_tables = [t for t in tables_to_process if t['monitor_enabled']]
            logger.info(f"📊 共 {len(tables_to_process)} 个表，其中 {len(monitored_tables)} 个启用监控")

            if not monitored_tables:
                return {
                    'success': True,
                    'processed_tables': 0,
                    'success_tables': 0,
                    'failed_tables': 0,
                    'elapsed_seconds': round(time.time() - start_time, 2),
                    'message': "没有启用监控的表需要统计"
                }

            # 纯增量设计：只区分两种统计类型
            # - manual: 手动操作（一键更新、单表COUNT、批量统计）
            # - hourly: 每小时定时任务（24次累积=日增量）
            stat_types_to_write = [stat_type]  # 只写入对应的类型
            logger.info(f"📝 将写入统计类型: {stat_types_to_write} (stat_type={stat_type})")

            if collection_method == 'information_schema':
                # 批量从information_schema获取统计信息
                result = self._batch_statistics_from_schema(config_id, monitored_tables, stat_types_to_write)
                success_tables = result['success_count']
                failed_tables = result['failed_count']
                processed_tables = success_tables + failed_tables
                
            elif collection_method == 'count_query':
                # 批量COUNT查询（仍需逐个处理，但使用连接池优化）
                result = self._batch_count_statistics(config_id, monitored_tables, stat_types_to_write)
                success_tables = result['success_count']
                failed_tables = result['failed_count']
                processed_tables = success_tables + failed_tables

            elapsed_seconds = round(time.time() - start_time, 2)
            logger.info(f"✅ 批量统计完成: 处理{processed_tables}表，成功{success_tables}，失败{failed_tables}，耗时{elapsed_seconds}秒")
            
            return {
                'success': True,
                'processed_tables': processed_tables,
                'success_tables': success_tables,
                'failed_tables': failed_tables,
                'elapsed_seconds': elapsed_seconds,
                'message': f"批量统计完成: 处理{processed_tables}表，成功{success_tables}个，失败{failed_tables}个"
            }
        except Exception as e:
            logger.error(f"❌ 执行数据统计失败: {e}")
            raise

    def _batch_statistics_from_schema(self, config_id: int, tables_list: List[Dict[str, Any]], stat_types: List[str]) -> Dict[str, Any]:
        """从information_schema批量获取统计信息 - 三增量设计（当天累计）"""
        success_count = 0
        failed_count = 0
        sr_conn = None
        
        try:
            logger.info(f"📊 使用information_schema批量统计 {len(tables_list)} 个表 [config_id={config_id}]，类型: {stat_types}")
            
            # 1. 获取上次统计值（从主表读取，包含三增量和当天基准）
            with get_db_cursor() as cursor:
                table_ids = [table_info['id'] for table_info in tables_list]
                if table_ids:
                    placeholders = ','.join(['%s'] * len(table_ids))
                    cursor.execute(f"""
                        SELECT id, database_name, table_name,
                               latest_total_rows, latest_data_length, latest_index_length,
                               current_increment_rows, total_increment_rows,
                               today_start_total_rows, today_start_data_length, today_start_date
                        FROM starrocks_tables
                        WHERE id IN ({placeholders})
                    """, table_ids)
                    previous_stats = {row['id']: row for row in cursor.fetchall()}
                else:
                    previous_stats = {}
            
            # 2. 批量查询StarRocks统计信息（带重试机制）
            config = self._get_starrocks_config(config_id)
            
            table_conditions = []
            for table_info in tables_list:
                table_conditions.append(f"(TABLE_SCHEMA = '{table_info['database_name']}' AND TABLE_NAME = '{table_info['table_name']}')")
            
            batch_condition = " OR ".join(table_conditions)
            
            # 添加重试逻辑（与 COUNT 模式保持一致）
            max_retries = 3
            schema_results = None
            query_success = False
            
            for retry in range(max_retries):
                sr_conn = None
                try:
                    if retry > 0:
                        logger.info(f"🔄 重试查询information_schema (尝试{retry+1}/{max_retries})")
                    
                    sr_conn = self._create_starrocks_connection(config)
                    cursor = sr_conn.cursor()
                    
                    cursor.execute(f"""
                        SELECT 
                            TABLE_SCHEMA as database_name,
                            TABLE_NAME as table_name,
                            COALESCE(TABLE_ROWS, 0) as total_rows,
                            COALESCE(DATA_LENGTH, 0) as data_length,
                            COALESCE(INDEX_LENGTH, 0) as index_length
                        FROM information_schema.TABLES
                        WHERE {batch_condition}
                    """)
                    schema_results = cursor.fetchall()
                    cursor.close()
                    
                    query_success = True
                    logger.info(f"✅ information_schema查询成功，获取到 {len(schema_results)} 条记录")
                    break  # 成功，跳出重试循环
                    
                except Exception as query_error:
                    logger.warning(f"⚠️ information_schema查询失败 (尝试{retry+1}/{max_retries}): {query_error}")
                    
                    if retry < max_retries - 1:
                        # 递增等待：第1次失败等30秒，第2次失败等60秒
                        wait_time = 30 if retry == 0 else 60
                        logger.info(f"⏰ 等待{wait_time}秒后重试...")
                        time.sleep(wait_time)
                    else:
                        # 3次都失败了
                        logger.error(f"❌ information_schema查询重试{max_retries}次均失败")
                        
                finally:
                    if sr_conn:
                        sr_conn.close()
                        sr_conn = None
            
            # 如果查询失败，返回失败结果
            if not query_success or not schema_results:
                logger.error(f"❌ 无法从information_schema获取数据，统计失败")
                return {'success_count': 0, 'failed_count': len(tables_list)}
            
            # 3. 创建查询结果字典
            schema_dict = {}
            for row in schema_results:
                key = f"{row['database_name']}.{row['table_name']}"
                schema_dict[key] = row
            
            # 4. 准备批量更新主表和插入历史记录的数据
            update_main_data = []
            insert_history_data = []
            
            # 获取当前日期（用于跨天检测）
            current_date = datetime.now().date()
            
            for table_info in tables_list:
                table_key = f"{table_info['database_name']}.{table_info['table_name']}"
                prev_stat = previous_stats.get(table_info['id'], {
                    'latest_total_rows': 0, 
                    'latest_data_length': 0,
                    'latest_index_length': 0,
                    'current_increment_rows': 0,
                    'total_increment_rows': 0,
                    'today_start_total_rows': 0,
                    'today_start_data_length': 0,
                    'today_start_date': None
                })
                
                if table_key in schema_dict:
                    stats = schema_dict[table_key]
                    
                    # 获取上次数据
                    prev_total = prev_stat['latest_total_rows'] or 0
                    prev_data = prev_stat['latest_data_length'] or 0
                    prev_current_increment = prev_stat['current_increment_rows'] or 0
                    today_start_total = prev_stat['today_start_total_rows'] or 0
                    today_start_data = prev_stat['today_start_data_length'] or 0
                    today_start_date = prev_stat['today_start_date']
                    
                    # 检查跨天
                    if today_start_date is None or today_start_date < current_date:
                        # 跨天了，重置今天的基准
                        today_start_total = prev_total
                        today_start_data = prev_data
                        today_start_date = current_date
                    
                    # 计算三增量
                    previous_increment_rows = prev_current_increment  # 上次增量
                    current_increment_rows = stats['total_rows'] - prev_total  # 当前增量
                    total_increment_rows = stats['total_rows'] - today_start_total  # 总增量（当天累计）
                    
                    previous_increment_data = 0
                    current_increment_data = stats['data_length'] - prev_data
                    total_increment_data = stats['data_length'] - today_start_data
                    
                    # 准备更新主表数据（三增量 + 当天基准）
                    update_main_data.append((
                        stats['total_rows'],
                        stats['data_length'],
                        stats['index_length'],
                        previous_increment_rows,
                        current_increment_rows,
                        total_increment_rows,
                        previous_increment_data,
                        current_increment_data,
                        total_increment_data,
                        today_start_total,
                        today_start_data,
                        today_start_date,
                        stat_types[0],  # 使用第一个stat_type
                        table_info['id']
                    ))
                    
                    # 准备插入历史记录（三增量设计）
                    for stat_type in stat_types:
                        insert_history_data.append((
                            config_id,
                            table_info['id'],
                            table_info['database_name'],
                            table_info['table_name'],
                            stat_type,
                            stats['total_rows'],
                            stats['data_length'],
                            stats['index_length'],
                            previous_increment_rows,
                            current_increment_rows,
                            total_increment_rows,
                            previous_increment_data,
                            current_increment_data,
                            total_increment_data,
                            'information_schema'
                        ))
                    
                    success_count += 1
                else:
                    failed_count += 1
            
            # 5. 批量更新主表（三增量设计 + 当天基准）
            if update_main_data:
                with get_db_cursor(autocommit=True) as cursor:
                    cursor.executemany("""
                        UPDATE starrocks_tables SET
                            latest_total_rows = %s,
                            latest_data_length = %s,
                            latest_index_length = %s,
                            previous_increment_rows = %s,
                            current_increment_rows = %s,
                            total_increment_rows = %s,
                            previous_increment_data = %s,
                            current_increment_data = %s,
                            total_increment_data = %s,
                            today_start_total_rows = %s,
                            today_start_data_length = %s,
                            today_start_date = %s,
                            last_stat_time = NOW(),
                            last_stat_type = %s,
                            updated_at = NOW()
                        WHERE id = %s
                    """, update_main_data)
            
            # 6. 批量插入历史记录（三增量设计）
            if insert_history_data:
                with get_db_cursor(autocommit=True) as cursor:
                    cursor.executemany("""
                        INSERT INTO starrocks_table_stats (
                            config_id, table_id, database_name, table_name, 
                            stat_time, stat_type, 
                            total_rows, data_length, index_length, 
                            previous_increment_rows, current_increment_rows, total_increment_rows,
                            previous_increment_data, current_increment_data, total_increment_data,
                            collection_method, collection_duration_ms
                        ) VALUES (%s, %s, %s, %s, NOW(), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 0)
                    """, insert_history_data)
            
            logger.info(f"✅ information_schema批量统计完成: 成功{success_count}，失败{failed_count}，更新{len(update_main_data)}条主表，插入{len(insert_history_data)}条历史")
            
        except Exception as e:
            logger.error(f"❌ information_schema批量统计失败: {e}")
            failed_count = len(tables_list)
            success_count = 0
        finally:
            # sr_conn 在重试循环的 finally 中已经关闭并设置为 None
            # 这里的检查是为了处理在重试循环之前发生异常的情况
            if sr_conn:
                sr_conn.close()
        
        return {'success_count': success_count, 'failed_count': failed_count}
    
    def _batch_count_statistics(self, config_id: int, tables_list: List[Dict[str, Any]], stat_types: List[str]) -> Dict[str, Any]:
        """批量COUNT查询统计 - 三增量设计"""
        success_count = 0
        failed_count = 0
        
        try:
            logger.info(f"🔢 批量COUNT统计 {len(tables_list)} 个表 [config_id={config_id}]，类型: {stat_types}")
            
            # 1. 获取上次统计值（从主表读取，包含三增量和当天基准）
            with get_db_cursor() as cursor:
                table_ids = [table_info['id'] for table_info in tables_list]
                if table_ids:
                    placeholders = ','.join(['%s'] * len(table_ids))
                    cursor.execute(f"""
                        SELECT id, database_name, table_name,
                               latest_total_rows, latest_data_length, latest_index_length,
                               current_increment_rows, total_increment_rows,
                               today_start_total_rows, today_start_data_length, today_start_date
                        FROM starrocks_tables
                        WHERE id IN ({placeholders})
                    """, table_ids)
                    previous_stats = {row['id']: row for row in cursor.fetchall()}
                else:
                    previous_stats = {}
            
            # 2. 获取StarRocks配置
            config = self._get_starrocks_config(config_id)
            
            # 3. 准备批量更新主表和插入历史记录的数据
            update_main_data = []
            insert_history_data = []
            batch_size = 50  # 每批处理50个表
            current_date = datetime.now().date()  # 当前日期（用于跨天检测）
            
            # 分批处理表列表
            for i in range(0, len(tables_list), batch_size):
                batch_tables = tables_list[i:i + batch_size]
                logger.info(f"🔢 处理批次 {i//batch_size + 1}/{(len(tables_list) + batch_size - 1)//batch_size}: {len(batch_tables)} 个表")
                
                # 每个表使用独立连接，避免一个超时影响其他表
                for table_info in batch_tables:
                    prev_stat = previous_stats.get(table_info['id'], {
                        'latest_total_rows': 0, 
                        'latest_data_length': 0,
                        'latest_index_length': 0,
                        'current_increment_rows': 0,
                        'total_increment_rows': 0,
                        'today_start_total_rows': 0,
                        'today_start_data_length': 0,
                        'today_start_date': None
                    })
                    
                    # 为每个表创建独立连接并执行COUNT（带重试机制）
                    db_name = table_info['database_name']
                    table_name = table_info['table_name']
                    current_total_rows = 0
                    max_retries = 3
                    count_success = False
                    
                    for retry in range(max_retries):
                        sr_conn = None
                        try:
                            # 创建独立连接
                            sr_conn = self._create_starrocks_connection(config)
                            cursor = sr_conn.cursor()
                            
                            if retry > 0:
                                logger.info(f"🔄 重试COUNT ({retry}/{max_retries}): {db_name}.{table_name}")
                            
                            # 执行COUNT查询
                            cursor.execute(f"SELECT COUNT(*) as total_rows FROM `{db_name}`.`{table_name}`")
                            result = cursor.fetchone()
                            current_total_rows = result['total_rows'] if result else 0
                            cursor.close()
                            
                            count_success = True
                            break  # 成功，跳出重试循环
                            
                        except Exception as retry_error:
                            logger.warning(f"⚠️ COUNT失败 (尝试{retry+1}/{max_retries}): {db_name}.{table_name} - {retry_error}")
                            if retry < max_retries - 1:
                                # 递增等待：第1次失败等30秒，第2次失败等60秒
                                wait_time = 30 if retry == 0 else 60
                                logger.info(f"⏰ 等待{wait_time}秒后重试...")
                                time.sleep(wait_time)
                        finally:
                            if sr_conn:
                                sr_conn.close()
                                sr_conn = None  # 关闭后设置为 None
                    
                    # 如果重试后仍失败，跳过此表
                    if not count_success:
                        logger.error(f"❌ COUNT重试{max_retries}次均失败，跳过表: {db_name}.{table_name}")
                        failed_count += 1
                        continue  # 跳过此表，继续处理下一个
                    
                    # COUNT成功，继续处理
                    
                    # 获取上次数据
                    prev_total = prev_stat['latest_total_rows'] or 0
                    prev_data = prev_stat['latest_data_length'] or 0
                    prev_index = prev_stat['latest_index_length'] or 0
                    prev_current_increment = prev_stat['current_increment_rows'] or 0
                    today_start_total = prev_stat['today_start_total_rows'] or 0
                    today_start_data = prev_stat['today_start_data_length'] or 0
                    today_start_date = prev_stat['today_start_date']
                    
                    # 检查跨天
                    if today_start_date is None or today_start_date < current_date:
                        today_start_total = prev_total
                        today_start_data = prev_data
                        today_start_date = current_date
                    
                    # 计算三增量
                    previous_increment_rows = prev_current_increment
                    current_increment_rows = current_total_rows - prev_total
                    total_increment_rows = current_total_rows - today_start_total
                    
                    previous_increment_data = 0
                    current_increment_data = 0
                    total_increment_data = 0
                    
                    # 准备更新主表数据
                    update_main_data.append((
                        current_total_rows,
                        prev_data,
                        prev_index,
                        previous_increment_rows,
                        current_increment_rows,
                        total_increment_rows,
                        previous_increment_data,
                        current_increment_data,
                        total_increment_data,
                        today_start_total,
                        today_start_data,
                        today_start_date,
                        stat_types[0],
                        table_info['id']
                    ))
                    
                    # 准备插入历史记录
                    for stat_type in stat_types:
                        insert_history_data.append((
                            config_id,
                            table_info['id'],
                            db_name,
                            table_name,
                            stat_type,
                            current_total_rows,
                            prev_data,
                            prev_index,
                            previous_increment_rows,
                            current_increment_rows,
                            total_increment_rows,
                            previous_increment_data,
                            current_increment_data,
                            total_increment_data,
                            'count_query'
                        ))
                    
                    success_count += 1
            
            # 4. 批量更新主表（三增量设计 + 当天基准）
            if update_main_data:
                with get_db_cursor(autocommit=True) as cursor:
                    cursor.executemany("""
                        UPDATE starrocks_tables SET
                            latest_total_rows = %s,
                            latest_data_length = %s,
                            latest_index_length = %s,
                            previous_increment_rows = %s,
                            current_increment_rows = %s,
                            total_increment_rows = %s,
                            previous_increment_data = %s,
                            current_increment_data = %s,
                            total_increment_data = %s,
                            today_start_total_rows = %s,
                            today_start_data_length = %s,
                            today_start_date = %s,
                            last_stat_time = NOW(),
                            last_stat_type = %s,
                            updated_at = NOW()
                        WHERE id = %s
                    """, update_main_data)
            
            # 5. 批量插入历史记录（三增量设计）
            if insert_history_data:
                with get_db_cursor(autocommit=True) as cursor:
                    cursor.executemany("""
                        INSERT INTO starrocks_table_stats (
                            config_id, table_id, database_name, table_name, 
                            stat_time, stat_type, 
                            total_rows, data_length, index_length, 
                            previous_increment_rows, current_increment_rows, total_increment_rows,
                            previous_increment_data, current_increment_data, total_increment_data,
                            collection_method, collection_duration_ms
                        ) VALUES (%s, %s, %s, %s, NOW(), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 0)
                    """, insert_history_data)
            
            logger.info(f"✅ 批量COUNT统计完成: 成功{success_count}，失败{failed_count}，更新{len(update_main_data)}条主表，插入{len(insert_history_data)}条历史")
            
        except Exception as e:
            logger.error(f"❌ 批量COUNT统计失败: {e}")
            failed_count = len(tables_list)
            success_count = 0
        
        return {'success_count': success_count, 'failed_count': failed_count}

    def sync_database_tables(self, config_id: int, database: str) -> Dict[str, Any]:
        """同步指定数据库的表结构"""
        sr_conn = None
        try:
            # 系统库列表
            SYSTEM_DATABASES = ['information_schema', '_statistics_', 'starrocks_audit_db__', 'sys', 'mysql', 'performance_schema']
            
            # 检查是否为系统库
            if database in SYSTEM_DATABASES:
                logger.warning(f"⚠️ 拒绝同步系统库: {database}")
                return {
                    'new_tables': 0,
                    'updated_tables': 0,
                    'failed_tables': 0,
                    'duration_ms': 0,
                    'message': f'系统库 {database} 不允许同步'
                }
            
            start_time = time.time()
            logger.info(f"🚀 开始同步数据库 {database} 的表结构 [config_id={config_id}]...")
            
            # 获取StarRocks配置并创建连接
            config = self._get_starrocks_config(config_id)
            sr_conn = self._create_starrocks_connection(config)
            
            # 获取指定数据库的所有表
            cursor = sr_conn.cursor()
            cursor.execute("""
                SELECT 
                    TABLE_SCHEMA as database_name,
                    TABLE_NAME as table_name,
                    TABLE_COMMENT,
                    TABLE_TYPE,
                    ENGINE
                FROM information_schema.TABLES 
                WHERE TABLE_SCHEMA = %s
                ORDER BY TABLE_NAME
            """, (database,))
            starrocks_tables = cursor.fetchall()
            cursor.close()
            
            # 获取管理数据库中该数据库的现有表（添加config_id过滤）
            with get_db_cursor() as cursor:
                cursor.execute("""
                    SELECT id, table_name 
                    FROM starrocks_tables 
                    WHERE config_id = %s AND database_name = %s
                """, (config_id, database))
                existing_tables = {row['table_name']: row['id'] for row in cursor.fetchall()}
            
            # 找出新表和需要更新的表
            new_tables = []
            update_tables = []
            
            for table_info in starrocks_tables:
                table_name = table_info['table_name']
                if table_name not in existing_tables:
                    new_tables.append(table_info)
                else:
                    update_tables.append({
                        'table_id': existing_tables[table_name],
                        'table_info': table_info
                    })
            
            # 插入新表（传递config_id）
            if new_tables:
                self._batch_insert_new_tables_with_info(config_id, new_tables)
            
            # 更新现有表的基本信息（批量CASE WHEN优化）
            if update_tables:
                batch_size = 100
                for i in range(0, len(update_tables), batch_size):
                    batch = update_tables[i:i+batch_size]
                    table_ids = [str(u['table_id']) for u in batch]
                    
                    # 构建CASE WHEN语句
                    comment_cases = []
                    engine_cases = []
                    type_cases = []
                    
                    for update_info in batch:
                        table_id = update_info['table_id']
                        table_info = update_info['table_info']
                        comment_cases.append(f"WHEN id={table_id} THEN {repr(table_info.get('TABLE_COMMENT', ''))}")
                        engine_cases.append(f"WHEN id={table_id} THEN {repr(table_info.get('ENGINE', 'StarRocks'))}")
                        type_cases.append(f"WHEN id={table_id} THEN {repr(table_info.get('TABLE_TYPE', 'BASE TABLE'))}")
                    
                    sql = f"""
                        UPDATE starrocks_tables SET
                            table_comment = CASE {' '.join(comment_cases)} END,
                            engine = CASE {' '.join(engine_cases)} END,
                            table_type = CASE {' '.join(type_cases)} END,
                            last_sync_time = NOW(),
                            updated_at = NOW()
                        WHERE id IN ({','.join(table_ids)})
                    """
                    
                    with get_db_cursor(autocommit=True) as cursor:
                        cursor.execute(sql)
            
            elapsed_seconds = round(time.time() - start_time, 2)
            
            return {
                'success': True,
                'database': database,
                'new_tables': len(new_tables),
                'updated_tables': len(update_tables),
                'total_tables': len(starrocks_tables),
                'elapsed_seconds': elapsed_seconds,
                'message': f"同步完成: 新增{len(new_tables)}表，更新{len(update_tables)}表，耗时{elapsed_seconds}秒"
            }
            
        except Exception as e:
            logger.error(f"❌ 同步数据库 {database} 失败: {e}")
            raise
        finally:
            if sr_conn:
                sr_conn.close()

    def execute_monitor_action(self, table_ids: List[int], enable: bool) -> Dict[str, Any]:
        """批量启用/禁用监控"""
        try:
            if not table_ids:
                return {
                    'success': True,
                    'processed_tables': 0,
                    'success_tables': 0,
                    'failed_tables': 0,
                    'updated_count': 0,
                    'message': "没有选择要操作的表"
                }
                
            with get_db_cursor(autocommit=True) as cursor:
                placeholders = ','.join(['%s'] * len(table_ids))
                cursor.execute(f"""
                    UPDATE starrocks_tables SET monitor_enabled = %s, updated_at = NOW()
                    WHERE id IN ({placeholders})
                """, (enable,) + tuple(table_ids))
                updated_count = cursor.rowcount
                return {
                    'success': True,
                    'processed_tables': len(table_ids),
                    'success_tables': updated_count,
                    'failed_tables': len(table_ids) - updated_count,
                    'message': f"成功{'启用' if enable else '禁用'} {updated_count} 个表的监控"
                }
        except Exception as e:
            logger.error(f"❌ 批量切换监控状态失败: {e}")
            raise

    def execute_schema_update(self, config_id: int, database: str, selected_table_ids: List[int]) -> Dict[str, Any]:
        """更新表结构（优化版本）"""
        start_time = time.time()
        try:
            # 获取要处理的表列表（添加config_id过滤）
            with get_db_cursor() as cursor:
                if selected_table_ids:
                    placeholders = ','.join(['%s'] * len(selected_table_ids))
                    cursor.execute(f"SELECT id, database_name, table_name, monitor_enabled FROM starrocks_tables WHERE config_id = %s AND id IN ({placeholders})", [config_id] + selected_table_ids)
                elif database:
                    cursor.execute("SELECT id, database_name, table_name, monitor_enabled FROM starrocks_tables WHERE config_id = %s AND database_name = %s", (config_id, database))
                else:
                    cursor.execute("SELECT id, database_name, table_name, monitor_enabled FROM starrocks_tables WHERE config_id = %s", (config_id,))
                all_tables = cursor.fetchall()

            # 过滤出启用监控的表
            tables_to_process = [t for t in all_tables if t['monitor_enabled']]
            
            if not tables_to_process:
                return {
                    'success': True,
                    'processed_tables': 0,
                    'elapsed_seconds': 0,
                    'message': "没有启用监控的表需要更新结构"
                }

            logger.info(f"🚀 开始批量更新 {len(tables_to_process)} 个表的结构 [config_id={config_id}]")

            success_count = 0
            failed_count = 0
            
            try:
                # 批量分析表结构（传递config_id）
                structure_results = self._batch_analyze_table_structure(config_id, tables_to_process)
                
                # 批量更新表基本信息（传递config_id）
                self._batch_update_table_info(config_id, structure_results)
                
                # 批量同步字段信息（传递config_id）
                self._batch_sync_table_columns(config_id, tables_to_process)
                
                success_count = len(tables_to_process)
                failed_count = 0
                
            except Exception as batch_error:
                logger.error(f"⚠️ 批量操作部分失败: {batch_error}")
                # 如果批量操作失败，尝试逐个处理以确定具体失败情况
                success_count, failed_count = self._fallback_individual_schema_update(config_id, tables_to_process)

            elapsed_seconds = round(time.time() - start_time, 2)
            processed_count = success_count + failed_count
            
            logger.info(f"✅ 批量表结构更新完成: 处理{processed_count}表，成功{success_count}，失败{failed_count}，耗时{elapsed_seconds}秒")
            
            return {
                'success': True,
                'processed_tables': processed_count,
                'success_tables': success_count,
                'failed_tables': failed_count,
                'elapsed_seconds': elapsed_seconds,
                'message': f"表结构更新完成：处理 {processed_count} 个表，成功 {success_count} 个，失败 {failed_count} 个，耗时 {elapsed_seconds} 秒"
            }
        except Exception as e:
            logger.error(f"❌ 执行表结构更新失败: {e}")
            raise

    def generate_query_statement(self, table_id: int) -> Dict[str, Any]:
        """生成全字段查询语句"""
        try:
            with get_db_cursor() as cursor:
                cursor.execute("SELECT database_name, table_name FROM starrocks_tables WHERE id = %s", (table_id,))
                table_info = cursor.fetchone()
                if not table_info:
                    return {"error": "表不存在"}, 404

                database = table_info['database_name']
                table_name = table_info['table_name']

                cursor.execute("SELECT column_name FROM starrocks_table_columns WHERE table_id = %s ORDER BY ordinal_position", (table_id,))
                columns = cursor.fetchall()
                if not columns:
                    return {"error": "未找到表字段信息"}, 404

                column_names = [col['column_name'] for col in columns]
                select_statement = f"SELECT {', '.join(column_names)} FROM `{database}`.`{table_name}` WHERE 1=1 LIMIT 100;"
                return {
                    'success': True,
                    'query_statement': select_statement,
                    'message': '查询语句已生成'
                }
        except Exception as e:
            logger.error(f"❌ 生成查询语句失败: {e}")
            raise

    def _discover_new_tables(self, config_id: int) -> Dict[str, Any]:
        """发现新表的内部方法 - 直接从StarRocks系统表查询所有表信息"""
        new_tables_count = 0
        sr_conn = None
        try:
            logger.info(f"🔍 开始从StarRocks系统表批量查询所有表信息 [config_id={config_id}]...")
            
            # 获取StarRocks配置并创建连接
            config = self._get_starrocks_config(config_id)
            sr_conn = self._create_starrocks_connection(config)
            
            # 直接从StarRocks的information_schema一次性查询所有表信息
            cursor = sr_conn.cursor()
            cursor.execute("""
                SELECT 
                    TABLE_SCHEMA as database_name,
                    TABLE_NAME as table_name,
                    TABLE_COMMENT,
                    TABLE_TYPE,
                    ENGINE
                FROM information_schema.TABLES 
                WHERE TABLE_SCHEMA NOT IN ('information_schema', '_statistics_', 'starrocks_audit_db__', 'sys', 'mysql', 'performance_schema')
                ORDER BY TABLE_SCHEMA, TABLE_NAME
            """)
            all_starrocks_tables = cursor.fetchall()
            cursor.close()
            
            logger.info(f"📊 StarRocks中共找到 {len(all_starrocks_tables)} 个表")
            
            if not all_starrocks_tables:
                return {'new_tables': 0}
            
            # 获取现有管理数据库中该config_id的所有表
            with get_db_cursor() as cursor:
                cursor.execute("SELECT database_name, table_name FROM starrocks_tables WHERE config_id = %s", (config_id,))
                existing_tables = set((row['database_name'], row['table_name']) for row in cursor.fetchall())
            
            logger.info(f"💾 管理数据库中现有 {len(existing_tables)} 个表")
            
            # 找出新表
            new_tables = []
            for table_info in all_starrocks_tables:
                db_name = table_info['database_name']
                table_name = table_info['table_name']
                if (db_name, table_name) not in existing_tables:
                    new_tables.append(table_info)
            
            logger.info(f"🆕 发现 {len(new_tables)} 个新表需要添加")
            
            # 批量插入新表（包含基本表信息，传递config_id）
            if new_tables:
                self._batch_insert_new_tables_with_info(config_id, new_tables)
                new_tables_count = len(new_tables)
            
            return {'new_tables': new_tables_count}
        except Exception as e:
            logger.error(f"❌ 发现新表失败: {e}")
            raise
        finally:
            if sr_conn:
                sr_conn.close()

    def _get_starrocks_databases(self) -> List[str]:
        """获取StarRocks数据库列表（过滤系统库）"""
        try:
            with get_starrocks_cursor() as cursor:
                cursor.execute("SHOW DATABASES")
                results = cursor.fetchall()
                # 过滤掉系统数据库
                system_dbs = {'sys', 'information_schema', 'mysql', 'performance_schema', '_statistics_'}
                return [row['Database'] for row in results 
                       if 'Database' in row and row['Database'] not in system_dbs]
        except Exception as e:
            logger.error(f"❌ 获取StarRocks数据库列表失败: {e}")
            raise

    def _get_starrocks_tables_in_database(self, database: str) -> List[str]:
        """获取指定数据库中的所有表名"""
        try:
            with get_starrocks_cursor() as cursor:
                cursor.execute(f"SHOW TABLES FROM `{database}`")
                results = cursor.fetchall()
                tables = []
                for row in results:
                    table_name = row.get(f'Tables_in_{database}')
                    if table_name:
                        tables.append(table_name)
                return tables
        except Exception as e:
            logger.error(f"❌ 获取数据库 {database} 中的表失败: {e}")
            raise

    def _batch_insert_new_tables_with_info(self, config_id: int, new_tables: List[Dict[str, Any]]):
        """批量插入新表到管理数据库（带系统表信息）"""
        try:
            logger.info(f"🚀 开始批量插入 {len(new_tables)} 个新表 [config_id={config_id}]...")
            
            # 批量插入基本表信息（使用从StarRocks系统表获取的信息）
            with get_db_cursor(autocommit=True) as cursor:
                insert_data = []
                for table_info in new_tables:
                    insert_data.append((
                        config_id,  # 添加config_id
                        table_info['database_name'], 
                        table_info['table_name'], 
                        table_info.get('TABLE_COMMENT', ''),
                        table_info.get('ENGINE', 'StarRocks'), 
                        table_info.get('TABLE_TYPE', 'BASE TABLE'),
                        0,  # is_partitioned - 后续完善
                        '',  # partition_field - 后续完善
                        'UNKNOWN',  # key_type - 后续完善
                        'SSD',  # storage_medium - 默认值
                        0,  # total_columns - 后续完善
                        1   # monitor_enabled - 默认启用
                    ))
                
                cursor.executemany("""
                    INSERT INTO starrocks_tables (
                        config_id, database_name, table_name, table_comment, engine, table_type,
                        is_partitioned, partition_field, key_type, storage_medium, 
                        total_columns, monitor_enabled, last_sync_time, created_at, updated_at
                    ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW(), NOW(), NOW())
                """, insert_data)
                
                logger.info(f"✅ 批量插入完成，共插入 {len(new_tables)} 个表")
                
                # 注意：这里只插入基本信息，详细字段信息让用户后续通过"更新表结构"完善
                # 这样可以大大提高"获取最新表"的速度
                
        except Exception as e:
            logger.error(f"❌ 批量插入新表失败: {e}")
            raise

    def _get_table_create_statement(self, config_id: int, database: str, table_name: str) -> Dict[str, Any]:
        """获取单个表的CREATE语句并解析key_type、storage_medium和分区信息（线程安全）"""
        sr_conn = None
        try:
            config = self._get_starrocks_config(config_id)
            sr_conn = self._create_starrocks_connection(config)
            
            cursor = sr_conn.cursor()
            cursor.execute(f"SHOW CREATE TABLE `{database}`.`{table_name}`")
            create_result = cursor.fetchone()
            create_stmt = list(create_result.values())[1] if create_result else ''
            cursor.close()
            
            # 解析key_type
            key_type = None
            if 'DUPLICATE KEY' in create_stmt:
                key_type = 'DUPLICATE KEY'
            elif 'AGGREGATE KEY' in create_stmt:
                key_type = 'AGGREGATE KEY'
            elif 'UNIQUE KEY' in create_stmt:
                key_type = 'UNIQUE KEY'
            elif 'PRIMARY KEY' in create_stmt:
                key_type = 'PRIMARY KEY'
            
            # 解析storage_medium
            storage_medium = 'HDD'  # 默认HDD
            if '"storage_medium" = "SSD"' in create_stmt or "'storage_medium' = 'SSD'" in create_stmt:
                storage_medium = 'SSD'
            
            # 解析分区信息
            is_partitioned = False
            partition_field = None
            if 'PARTITION BY' in create_stmt:
                is_partitioned = True
                # 提取分区表达式
                import re
                # 匹配 PARTITION BY xxx(字段) 或 PARTITION BY 表达式
                match = re.search(r'PARTITION BY (.+?)(?:DISTRIBUTED|PROPERTIES|\n)', create_stmt, re.DOTALL)
                if match:
                    partition_expr = match.group(1).strip()
                    partition_field = partition_expr
                    # 如果是简单的字段分区，提取字段名
                    field_match = re.search(r'[`\']?(\w+)[`\']?\s*\)', partition_expr)
                    if field_match:
                        partition_field = field_match.group(1)
            
            return {
                'database': database,
                'table_name': table_name,
                'key_type': key_type,
                'storage_medium': storage_medium,
                'is_partitioned': is_partitioned,
                'partition_field': partition_field,
                'success': True
            }
        except Exception as e:
            logger.warning(f"⚠️ 获取 {database}.{table_name} 的CREATE语句失败: {e}")
            return {
                'database': database,
                'table_name': table_name,
                'key_type': None,
                'storage_medium': 'HDD',
                'is_partitioned': False,
                'partition_field': None,
                'success': False,
                'error': str(e)
            }
        finally:
            if sr_conn:
                sr_conn.close()

    def _batch_analyze_table_structure(self, config_id: int, tables_list: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
        """批量分析表结构（多线程优化版本）"""
        try:
            total_tables = len(tables_list)
            logger.info(f"📊 批量分析 {total_tables} 个表的结构（多线程模式） [config_id={config_id}]")
            
            # 构建查询条件
            table_conditions = []
            params = []
            for table in tables_list:
                table_conditions.append("(TABLE_SCHEMA = %s AND TABLE_NAME = %s)")
                params.extend([table['database_name'], table['table_name']])
            
            where_clause = " OR ".join(table_conditions)
            
            structure_results = {}
            
            # 第1步：批量获取基础信息（字段统计、分区信息）
            logger.info("📥 步骤1/3: 批量获取字段和分区信息...")
            config = self._get_starrocks_config(config_id)
            sr_conn = self._create_starrocks_connection(config)
            try:
                cursor = sr_conn.cursor()
                # 批量获取字段统计信息
                cursor.execute(f"""
                    SELECT 
                        TABLE_SCHEMA as database_name,
                        TABLE_NAME as table_name,
                        COUNT(*) as total_columns,
                        SUM(CASE WHEN COLUMN_KEY = 'PRI' THEN 1 ELSE 0 END) as primary_keys
                    FROM information_schema.COLUMNS 
                    WHERE {where_clause}
                    GROUP BY TABLE_SCHEMA, TABLE_NAME
                """, params)
                column_stats = cursor.fetchall()
                
                # 批量获取分区信息
                cursor.execute(f"""
                    SELECT DISTINCT
                        TABLE_SCHEMA as database_name,
                        TABLE_NAME as table_name,
                        PARTITION_NAME
                    FROM information_schema.PARTITIONS
                    WHERE ({where_clause}) 
                    AND PARTITION_NAME IS NOT NULL AND PARTITION_NAME != ''
                """, params)
                partition_info = cursor.fetchall()
                cursor.close()
            finally:
                if sr_conn:
                    sr_conn.close()
            
            # 构建分区信息字典和列统计字典
            partition_dict = {}
            for p in partition_info:
                key = f"{p['database_name']}.{p['table_name']}"
                if key not in partition_dict:
                    partition_dict[key] = p['PARTITION_NAME']
            
            column_stats_dict = {}
            for stats in column_stats:
                key = f"{stats['database_name']}.{stats['table_name']}"
                column_stats_dict[key] = stats
            
            # 第2步：多线程并行获取SHOW CREATE TABLE信息
            logger.info(f"🚀 步骤2/3: 并行获取表结构信息（最多20个线程）...")
            create_info_dict = {}
            
            # 使用ThreadPoolExecutor并行执行SHOW CREATE TABLE
            max_workers = min(20, total_tables)  # 最多20个线程
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                # 提交所有任务（传递config_id）
                future_to_table = {
                    executor.submit(
                        self._get_table_create_statement, 
                        config_id,  # 添加config_id参数
                        stats['database_name'], 
                        stats['table_name']
                    ): f"{stats['database_name']}.{stats['table_name']}"
                    for stats in column_stats
                }
                
                # 收集结果
                completed = 0
                for future in as_completed(future_to_table):
                    table_key = future_to_table[future]
                    try:
                        result = future.result()
                        create_info_dict[table_key] = result
                        completed += 1
                        if completed % 50 == 0:
                            logger.info(f"   进度: {completed}/{total_tables} 表已分析")
                    except Exception as exc:
                        logger.error(f"⚠️ 表 {table_key} 分析出错: {exc}")
            
            logger.info(f"✅ 并行分析完成: {len(create_info_dict)}/{total_tables}")
            
            # 第3步：组装最终结果
            logger.info("🔧 步骤3/3: 组装最终结果...")
            for stats in column_stats:
                table_key = f"{stats['database_name']}.{stats['table_name']}"
                
                # 从create_info获取所有解析的信息（包括分区信息）
                create_info = create_info_dict.get(table_key, {})
                key_type = create_info.get('key_type')
                storage_medium = create_info.get('storage_medium', 'HDD')
                is_partitioned = create_info.get('is_partitioned', False)  # 使用SHOW CREATE TABLE解析的结果
                partition_field = create_info.get('partition_field')
                
                # 如果没有获取到key_type，使用默认逻辑
                if not key_type:
                    key_type = 'PRIMARY KEY' if stats.get('primary_keys', 0) > 0 else None
                
                structure_results[table_key] = {
                    'database_name': stats['database_name'],
                    'table_name': stats['table_name'],
                    'is_partitioned': is_partitioned,
                    'partition_field': partition_field,
                    'key_type': key_type,
                    'storage_medium': storage_medium,
                    'total_columns': stats['total_columns']
                }
            
            # 为没有统计信息的表添加默认值
            for table in tables_list:
                table_key = f"{table['database_name']}.{table['table_name']}"
                if table_key not in structure_results:
                    logger.warning(f"⚠️ 表 {table_key} 未获取到统计信息，使用默认值")
                    structure_results[table_key] = {
                        'database_name': table['database_name'],
                        'table_name': table['table_name'],
                        'is_partitioned': False,
                        'partition_field': None,
                        'key_type': None,
                        'storage_medium': 'HDD',
                        'total_columns': 0
                    }
            
            logger.info(f"✅ 批量表结构分析完成: {len(structure_results)} 个表")
            return structure_results
            
        except Exception as e:
            logger.error(f"❌ 批量分析表结构失败: {e}")
            raise

    def _batch_update_table_info(self, config_id: int, structure_results: Dict[str, Dict[str, Any]]):
        """批量更新表基本信息（优化版本）"""
        try:
            logger.info(f"📝 批量更新 {len(structure_results)} 个表的基本信息 [config_id={config_id}]")
            
            # 第1步：一次性批量获取所有表的ID映射（添加config_id过滤）
            table_id_map = {}
            table_conditions = []
            params = [config_id]  # 添加config_id作为第一个参数
            
            for table_key, info in structure_results.items():
                table_conditions.append("(database_name = %s AND table_name = %s)")
                params.extend([info['database_name'], info['table_name']])
            
            if table_conditions:
                where_clause = " OR ".join(table_conditions)
                with get_db_cursor() as cursor:
                    cursor.execute(f"SELECT id, database_name, table_name FROM starrocks_tables WHERE config_id = %s AND ({where_clause})", params)
                    results = cursor.fetchall()
                    for row in results:
                        table_key = f"{row['database_name']}.{row['table_name']}"
                        table_id_map[table_key] = row['id']
            
            # 第2步：准备批量更新数据
            update_data = []
            for table_key, info in structure_results.items():
                if table_key in table_id_map:
                    update_data.append((
                        info['is_partitioned'],
                        info['partition_field'],
                        info['key_type'],
                        info['storage_medium'],
                        info['total_columns'],
                        table_id_map[table_key]
                    ))
            
            # 第3步：批量执行更新（使用CASE WHEN一次性更新所有记录）
            if update_data:
                # 分批更新，避免SQL语句过长
                batch_size = 100
                total_updated = 0
                
                for i in range(0, len(update_data), batch_size):
                    batch = update_data[i:i+batch_size]
                    table_ids = [str(row[5]) for row in batch]
                    
                    # 构建CASE WHEN语句
                    is_partitioned_cases = []
                    partition_field_cases = []
                    key_type_cases = []
                    storage_medium_cases = []
                    total_columns_cases = []
                    
                    for row in batch:
                        table_id = row[5]
                        is_partitioned_cases.append(f"WHEN id={table_id} THEN {1 if row[0] else 0}")
                        partition_field_cases.append(f"WHEN id={table_id} THEN {repr(row[1]) if row[1] else 'NULL'}")
                        key_type_cases.append(f"WHEN id={table_id} THEN {repr(row[2]) if row[2] else 'NULL'}")
                        storage_medium_cases.append(f"WHEN id={table_id} THEN {repr(row[3])}")
                        total_columns_cases.append(f"WHEN id={table_id} THEN {row[4]}")
                    
                    sql = f"""
                        UPDATE starrocks_tables SET
                            is_partitioned = CASE {' '.join(is_partitioned_cases)} END,
                            partition_field = CASE {' '.join(partition_field_cases)} END,
                            key_type = CASE {' '.join(key_type_cases)} END,
                            storage_medium = CASE {' '.join(storage_medium_cases)} END,
                            total_columns = CASE {' '.join(total_columns_cases)} END,
                            last_sync_time = NOW(),
                            updated_at = NOW()
                        WHERE id IN ({','.join(table_ids)})
                    """
                    
                    with get_db_cursor(autocommit=True) as cursor:
                        cursor.execute(sql)
                        total_updated += len(batch)
                
                logger.info(f"✅ 批量更新表基本信息完成: {total_updated} 个表")
            else:
                logger.info(f"✅ 批量更新表基本信息完成: 0 个表")
            
        except Exception as e:
            logger.error(f"❌ 批量更新表基本信息失败: {e}")
            raise

    def _batch_sync_table_columns(self, config_id: int, tables_list: List[Dict[str, Any]]):
        """批量同步表字段信息（优化版本）"""
        sr_conn = None
        try:
            total_tables = len(tables_list)
            logger.info(f"🔄 批量同步 {total_tables} 个表的字段信息 [config_id={config_id}]")
            
            # 第1步：一次性获取所有表的ID映射（添加config_id过滤）
            table_id_map = {}
            with get_db_cursor() as cursor:
                table_conditions = []
                params = [config_id]  # 添加config_id作为第一个参数
                for table in tables_list:
                    table_conditions.append("(database_name = %s AND table_name = %s)")
                    params.extend([table['database_name'], table['table_name']])
                
                where_clause = " OR ".join(table_conditions)
                cursor.execute(f"SELECT id, database_name, table_name FROM starrocks_tables WHERE config_id = %s AND ({where_clause})", params)
                results = cursor.fetchall()
                for row in results:
                    table_key = f"{row['database_name']}.{row['table_name']}"
                    table_id_map[table_key] = row['id']
            
            table_ids = list(table_id_map.values())
            
            if not table_ids:
                logger.warning("⚠️ 未找到任何表ID，跳过字段同步")
                return
            
            # 第2步：批量删除旧字段
            logger.info(f"🗑️  删除 {len(table_ids)} 个表的旧字段...")
            with get_db_cursor(autocommit=True) as cursor:
                # 分批删除，避免SQL语句过长
                batch_size = 100
                for i in range(0, len(table_ids), batch_size):
                    batch_ids = table_ids[i:i+batch_size]
                    placeholders = ','.join(['%s'] * len(batch_ids))
                    cursor.execute(f"DELETE FROM starrocks_table_columns WHERE table_id IN ({placeholders})", batch_ids)
            
            # 第3步：批量获取所有表的字段信息（使用动态连接）
            logger.info("📥 从StarRocks获取字段信息...")
            table_conditions = []
            params = []
            for table in tables_list:
                table_conditions.append("(TABLE_SCHEMA = %s AND TABLE_NAME = %s)")
                params.extend([table['database_name'], table['table_name']])
            
            where_clause = " OR ".join(table_conditions)
            
            config = self._get_starrocks_config(config_id)
            sr_conn = self._create_starrocks_connection(config)
            cursor = sr_conn.cursor()
            cursor.execute(f"""
                SELECT 
                    TABLE_SCHEMA as database_name, TABLE_NAME as table_name,
                    COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, 
                    DATA_TYPE, COLUMN_TYPE, COLUMN_KEY, EXTRA, COLUMN_COMMENT
                FROM information_schema.COLUMNS
                WHERE {where_clause}
                ORDER BY TABLE_SCHEMA, TABLE_NAME, ORDINAL_POSITION
            """, params)
            all_columns = cursor.fetchall()
            cursor.close()
                
            # 第4步：准备插入数据（添加config_id）
            logger.info(f"🔧 准备插入 {len(all_columns)} 个字段...")
            insert_data = []
            for col in all_columns:
                table_key = f"{col['database_name']}.{col['table_name']}"
                if table_key in table_id_map:
                    insert_data.append((
                        config_id,  # 添加config_id
                        table_id_map[table_key], col['database_name'], col['table_name'],
                        col['COLUMN_NAME'], col['ORDINAL_POSITION'], col['COLUMN_DEFAULT'],
                        col['IS_NULLABLE'], col['DATA_TYPE'], col['COLUMN_TYPE'],
                        col['COLUMN_KEY'], col['EXTRA'], col['COLUMN_COMMENT']
                    ))
            
            # 第5步：分批插入字段（避免SQL语句过长）
            if insert_data:
                logger.info(f"💾 分批插入字段数据...")
                batch_size = 500  # 每批插入500个字段
                total_batches = (len(insert_data) + batch_size - 1) // batch_size
                
                with get_db_cursor(autocommit=True) as cursor:
                    for i in range(0, len(insert_data), batch_size):
                        batch_data = insert_data[i:i+batch_size]
                        cursor.executemany("""
                            INSERT INTO starrocks_table_columns (
                                config_id, table_id, database_name, table_name, column_name, ordinal_position,
                                column_default, is_nullable, data_type, column_type, column_key,
                                extra, column_comment
                            ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                        """, batch_data)
                        
                        current_batch = (i // batch_size) + 1
                        if total_batches > 1:
                            logger.info(f"   进度: 批次 {current_batch}/{total_batches} 完成")
            
            logger.info(f"✅ 批量同步字段信息完成: {len(insert_data)} 个字段")
            
        except Exception as e:
            logger.error(f"❌ 批量同步字段信息失败: {e}")
            raise
        finally:
            if sr_conn:
                sr_conn.close()

    def _fallback_individual_schema_update(self, config_id: int, tables_list: List[Dict[str, Any]]) -> tuple:
        """备用方案：逐个更新表结构（当批量操作失败时）"""
        success_count = 0
        failed_count = 0
        
        logger.info(f"🔄 批量操作失败，切换到逐个处理模式")
        
        for table_info in tables_list:
            try:
                table_id = table_info['id']
                db_name = table_info['database_name']
                tbl_name = table_info['table_name']
                
                # 分析表结构（传递config_id）
                structure_info = self._analyze_table_structure(config_id, db_name, tbl_name)
                
                # 更新表基本信息
                with get_db_cursor(autocommit=True) as cursor:
                    cursor.execute("""
                        UPDATE starrocks_tables SET
                            is_partitioned = %s,
                            partition_field = %s,
                            key_type = %s,
                            storage_medium = %s,
                            total_columns = %s,
                            last_sync_time = NOW(),
                            updated_at = NOW()
                        WHERE id = %s
                    """, (
                        structure_info['is_partitioned'],
                        structure_info['partition_field'],
                        structure_info['key_type'],
                        structure_info['storage_medium'],
                        structure_info['total_columns'],
                        table_id
                    ))
                
                # 同步字段信息
                self._sync_table_columns(config_id, table_id, db_name, tbl_name)
                success_count += 1
                logger.info(f"✅ 单独更新表结构成功: {db_name}.{tbl_name}")
                
            except Exception as e:
                failed_count += 1
                logger.error(f"❌ 单独更新表结构失败: {table_info['database_name']}.{table_info['table_name']} - {e}")
        
        logger.info(f"🔄 逐个处理完成: 成功{success_count}，失败{failed_count}")
        return success_count, failed_count

    def _analyze_table_structure(self, config_id: int, database: str, table_name: str) -> Dict[str, Any]:
        """分析表结构（使用SHOW CREATE TABLE和information_schema）"""
        sr_conn = None
        try:
            config = self._get_starrocks_config(config_id)
            sr_conn = self._create_starrocks_connection(config)
            cursor = sr_conn.cursor()
            
            # 获取字段总数
            cursor.execute("""
                SELECT COUNT(*) as total_columns
                FROM information_schema.COLUMNS 
                WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
            """, (database, table_name))
            result = cursor.fetchone()
            total_columns = result['total_columns'] if result else 0

            # 检查是否分区表
            cursor.execute("""
                SELECT PARTITION_NAME
                FROM information_schema.PARTITIONS
                WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
                AND PARTITION_NAME IS NOT NULL AND PARTITION_NAME != ''
                LIMIT 1
            """, (database, table_name))
            partition_result = cursor.fetchone()
            is_partitioned = partition_result is not None
            partition_field = partition_result['PARTITION_NAME'] if partition_result else None

            # 使用SHOW CREATE TABLE获取准确的key_type和storage_medium
            cursor.execute(f"SHOW CREATE TABLE `{database}`.`{table_name}`")
            create_table_result = cursor.fetchone()
            create_stmt = list(create_table_result.values())[1] if create_table_result else ''
            
            cursor.close()
            
            # 解析key_type
            key_type = None
            if 'DUPLICATE KEY' in create_stmt:
                key_type = 'DUPLICATE KEY'
            elif 'AGGREGATE KEY' in create_stmt:
                key_type = 'AGGREGATE KEY'
            elif 'UNIQUE KEY' in create_stmt:
                key_type = 'UNIQUE KEY'
            elif 'PRIMARY KEY' in create_stmt:
                key_type = 'PRIMARY KEY'
            
            # 解析storage_medium
            storage_medium = 'HDD'  # 默认HDD
            if '"storage_medium" = "SSD"' in create_stmt or "'storage_medium' = 'SSD'" in create_stmt:
                storage_medium = 'SSD'

            return {
                'is_partitioned': is_partitioned,
                'partition_field': partition_field,
                'key_type': key_type,
                'storage_medium': storage_medium,
                'total_columns': total_columns
            }
        except Exception as e:
            logger.error(f"❌ 分析表 {database}.{table_name} 结构失败: {e}")
            return {
                'is_partitioned': False,
                'partition_field': None,
                'key_type': None,
                'storage_medium': 'HDD',
                'total_columns': 0
            }
        finally:
            if sr_conn:
                sr_conn.close()

    def _sync_table_columns(self, config_id: int, table_id: int, database: str, table_name: str):
        """同步表字段信息"""
        try:
            # 先删除旧字段
            with get_db_cursor(autocommit=True) as mysql_cursor:
                mysql_cursor.execute("DELETE FROM starrocks_table_columns WHERE table_id = %s", (table_id,))

            # 获取StarRocks字段信息
            with get_starrocks_cursor() as sr_cursor:
                sr_cursor.execute("""
                    SELECT 
                        COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, 
                        DATA_TYPE, COLUMN_TYPE, COLUMN_KEY, EXTRA, COLUMN_COMMENT
                    FROM information_schema.COLUMNS
                    WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
                    ORDER BY ORDINAL_POSITION
                """, (database, table_name))
                columns_data = sr_cursor.fetchall()

            # 插入新字段信息
            with get_db_cursor(autocommit=True) as mysql_cursor:
                for col in columns_data:
                    mysql_cursor.execute("""
                        INSERT INTO starrocks_table_columns (
                            config_id, table_id, database_name, table_name, column_name, ordinal_position,
                            column_default, is_nullable, data_type, column_type, column_key,
                            extra, column_comment
                        ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                    """, (
                        config_id, table_id, database, table_name, col['COLUMN_NAME'], col['ORDINAL_POSITION'],
                        col['COLUMN_DEFAULT'], col['IS_NULLABLE'], col['DATA_TYPE'], col['COLUMN_TYPE'],
                        col['COLUMN_KEY'], col['EXTRA'], col['COLUMN_COMMENT']
                    ))
        except Exception as e:
            logger.error(f"❌ 同步表 {database}.{table_name} 字段失败: {e}")
            raise


# 创建全局服务实例
starrocks_service = StarRocksTableService()

