"""
StarRocks表管理定时任务调度器
支持定时统计数据收集，使用连接池和分批处理优化
"""

import os
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
import atexit

from ..utils.logger import FlinkLogger
from .starrocks_service import StarRocksTableService

# 延迟初始化logger，避免应用上下文问题
logger = None

def get_logger():
    global logger
    if logger is None:
        logger = FlinkLogger.get_logger(__name__)
    return logger


class StarRocksScheduler:
    """StarRocks表管理定时任务调度器"""
    
    def __init__(self):
        self.scheduler = None
        self.starrocks_service = StarRocksTableService()
        self.is_running = False
        self._job_configs = None  # 延迟加载，在start时再加载
    
    def _get_db_config(self, key: str, default: any = None) -> any:
        """从数据库读取配置"""
        try:
            from ..utils.mysql_db import get_db_cursor
            with get_db_cursor() as cursor:
                cursor.execute("SELECT config_value FROM starrocks_monitor_config WHERE config_key = %s", (key,))
                result = cursor.fetchone()
                if result:
                    value = result['config_value']
                    # 转换布尔值
                    if value.lower() in ['true', '1', 'yes', 'on']:
                        return True
                    elif value.lower() in ['false', '0', 'no', 'off']:
                        return False
                    return value
                return default
        except Exception as e:
            get_logger().warning(f"读取配置 {key} 失败: {e}，使用默认值 {default}")
            return default
        
    def _load_job_configs(self) -> Dict[str, Dict]:
        """加载任务配置 - 纯增量设计（只保留hourly任务）"""
        # 从数据库读取配置
        hourly_enabled = self._get_db_config('hourly_collection_enabled', True)  # 默认启用
        hourly_cron = self._get_db_config('hourly_collection_cron', '3 * * * *')  # 默认每小时第3分钟
        default_method = self._get_db_config('default_collection_method', 'information_schema')  # 从数据库读取收集方式
        
        # 解析cron表达式
        def parse_cron(cron_str):
            """解析cron表达式为分钟、小时、日、月、周"""
            parts = cron_str.strip().split()
            if len(parts) >= 2:
                return {
                    'minute': parts[0],
                    'hour': parts[1],
                    'day': parts[2] if len(parts) > 2 else '*',
                    'month': parts[3] if len(parts) > 3 else '*',
                    'day_of_week': parts[4] if len(parts) > 4 else '*'
                }
            return None
        
        hourly_schedule = parse_cron(hourly_cron)
        
        return {
            # 每小时统计：从数据库读取收集方式（支持切换快速/精确模式）
            'hourly_stats_collection': {
                'id': 'hourly_stats_collection',
                'name': f'每小时数据统计({default_method})',
                'trigger': 'cron',
                'minute': hourly_schedule['minute'] if hourly_schedule else '3',
                'hour': hourly_schedule['hour'] if hourly_schedule else '*',
                'day': hourly_schedule['day'] if hourly_schedule else '*',
                'month': hourly_schedule['month'] if hourly_schedule else '*',
                'day_of_week': hourly_schedule['day_of_week'] if hourly_schedule else '*',
                'method': default_method,  # 从数据库读取收集方式
                'stat_type': 'hourly',  # 统计类型：小时增量
                'database': '',  # 全部数据库
                'enabled': hourly_enabled,  # 从数据库读取
                'description': f'每小时统计（{default_method}模式）: {hourly_cron}'
            },
            
            # 可选：每6小时统计（备用任务，默认禁用）
            'six_hourly_stats': {
                'id': 'six_hourly_stats', 
                'name': '每6小时统计(information_schema)',
                'trigger': 'interval',
                'hours': 6,
                'method': 'information_schema',
                'stat_type': 'hourly',  # 统计类型：也是hourly类型
                'database': '',  # 全部数据库
                'enabled': False,  # 默认禁用
                'description': '每6小时执行一次全量统计，适合备用监控'
            },
            
        }
    
    def start(self):
        """启动调度器"""
        if self.is_running:
            get_logger().warning("调度器已经在运行中")
            return
            
        try:
            # 启动时重新加载配置（从数据库读取最新配置）
            self._job_configs = self._load_job_configs()
            get_logger().info(f"📝 已从数据库加载配置: {[k for k, v in self._job_configs.items() if v.get('enabled')]}")
            
            self.scheduler = BackgroundScheduler(
                timezone='Asia/Shanghai',
                job_defaults={
                    'coalesce': False,  # 不合并错过的任务
                    'max_instances': 1,  # 每个任务最多1个实例
                    'misfire_grace_time': 30  # 错过任务的宽限时间30秒
                }
            )
            
            # 添加事件监听器
            self.scheduler.add_listener(self._job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
            
            # 添加所有启用的任务
            self._add_jobs()
            
            # 启动调度器
            self.scheduler.start()
            self.is_running = True
            
            # 注册退出时关闭调度器
            atexit.register(self.shutdown)
            
            get_logger().info("📅 StarRocks定时任务调度器启动成功")
            self._log_scheduled_jobs()
            
        except Exception as e:
            get_logger().error(f"❌ 调度器启动失败: {e}")
            raise
    
    def shutdown(self):
        """关闭调度器"""
        if self.scheduler and self.is_running:
            self.scheduler.shutdown()
            self.is_running = False
            get_logger().info("📅 StarRocks定时任务调度器已关闭")
    
    def _add_jobs(self):
        """添加所有启用的任务"""
        for job_id, config in self._job_configs.items():
            if config.get('enabled', False):
                self._add_single_job(job_id, config)
    
    def _add_single_job(self, job_id: str, config: Dict):
        """添加单个任务"""
        try:
            # 创建触发器
            if config['trigger'] == 'cron':
                trigger = CronTrigger(
                    hour=config.get('hour'),
                    minute=config.get('minute', 0),
                    second=config.get('second', 0),
                    timezone='Asia/Shanghai'
                )
            elif config['trigger'] == 'interval':
                trigger = IntervalTrigger(
                    hours=config.get('hours', 0),
                    minutes=config.get('minutes', 0),
                    seconds=config.get('seconds', 0),
                    timezone='Asia/Shanghai'
                )
            else:
                get_logger().error(f"❌ 不支持的触发器类型: {config['trigger']}")
                return
            
            # 添加任务
            self.scheduler.add_job(
                func=self._execute_stats_collection,
                trigger=trigger,
                id=job_id,
                name=config['name'],
                kwargs={
                    'job_id': job_id,
                    'method': config['method'],
                    'stat_type': config.get('stat_type', 'manual'),
                    'database': config.get('database', ''),
                    'description': config.get('description', '')
                }
            )
            
            get_logger().info(f"✅ 添加定时任务: {config['name']} (ID: {job_id})")
            
        except Exception as e:
            get_logger().error(f"❌ 添加任务失败 {job_id}: {e}")
    
    def _get_all_starrocks_configs(self) -> List[Dict]:
        """获取所有活跃的StarRocks配置"""
        try:
            from ..utils.mysql_db import get_db_cursor
            with get_db_cursor() as cursor:
                cursor.execute("""
                    SELECT id, config_name, host, port 
                    FROM service_config 
                    WHERE service_type = 'starrocks' 
                    AND is_deleted = 0 
                    AND is_active = 1
                    ORDER BY id
                """)
                configs = cursor.fetchall()
                get_logger().info(f"📋 获取到 {len(configs)} 个活跃的StarRocks配置")
                return configs
        except Exception as e:
            get_logger().error(f"❌ 获取StarRocks配置失败: {e}")
            return []
    
    def _execute_stats_collection(self, job_id: str, method: str, stat_type: str = 'manual', database: str = '', description: str = ''):
        """执行统计数据收集任务 - 支持多环境"""
        start_time = time.time()
        get_logger().info(f"🚀 开始执行定时任务: {job_id} - {description}")
        get_logger().info(f"📊 统计方法: {method}, 类型: {stat_type}, 数据库: {database or '全部数据库'}")
        
        # 获取所有StarRocks配置
        starrocks_configs = self._get_all_starrocks_configs()
        
        if not starrocks_configs:
            get_logger().warning(f"⚠️ 没有找到活跃的StarRocks配置，跳过任务: {job_id}")
            return
        
        get_logger().info(f"🔄 将为 {len(starrocks_configs)} 个StarRocks实例执行统计任务")
        
        # 汇总结果
        total_results = {
            'processed_tables': 0,
            'success_tables': 0,
            'failed_tables': 0,
            'configs_processed': 0,
            'configs_failed': 0
        }
        
        # 遍历每个StarRocks配置
        for config in starrocks_configs:
            config_id = config['id']
            config_name = config['config_name']
            config_host = f"{config['host']}:{config['port']}"
            
            try:
                get_logger().info(f"📍 开始处理配置: {config_name} ({config_host}) [config_id={config_id}]")
                
                # 调用统计服务，传递config_id
                result = self.starrocks_service.execute_data_statistics(
                    config_id=config_id,  # 传递config_id
                    database=database,
                    collection_method=method,
                    selected_table_ids=[],  # 空列表表示所有表
                    stat_type=stat_type
                )
                
                # 累加结果
                total_results['processed_tables'] += result.get('processed_tables', 0)
                total_results['success_tables'] += result.get('success_tables', 0)
                total_results['failed_tables'] += result.get('failed_tables', 0)
                total_results['configs_processed'] += 1
                
                get_logger().info(f"✅ 配置处理完成: {config_name} - "
                           f"处理{result.get('processed_tables', 0)}表, "
                           f"成功{result.get('success_tables', 0)}个, "
                           f"失败{result.get('failed_tables', 0)}个")
                
            except Exception as e:
                total_results['configs_failed'] += 1
                get_logger().error(f"❌ 配置处理失败: {config_name} ({config_host}) - {e}")
                # 继续处理下一个配置，不中断整个任务
                continue
        
        elapsed_time = time.time() - start_time
        
        # 记录汇总日志
        get_logger().info(f"{'='*60}")
        get_logger().info(f"✅ 定时任务完成: {job_id}")
        get_logger().info(f"🌐 配置统计: 处理{total_results['configs_processed']}个, "
                   f"失败{total_results['configs_failed']}个")
        get_logger().info(f"📈 表统计: 处理{total_results['processed_tables']}表, "
                   f"成功{total_results['success_tables']}个, "
                   f"失败{total_results['failed_tables']}个")
        get_logger().info(f"⏱️  总执行时间: {elapsed_time:.2f}秒")
        get_logger().info(f"{'='*60}")
        
        # 保存任务结果
        success = total_results['configs_failed'] == 0
        self._save_job_result(job_id, success, total_results, elapsed_time)
    
    def _save_job_result(self, job_id: str, success: bool, result: Dict, elapsed_time: float):
        """保存任务执行结果（可选实现）"""
        # 这里可以实现将任务执行结果保存到数据库
        # 例如创建一个 scheduled_job_history 表来记录执行历史
        try:
            from ..utils.mysql_db import get_db_cursor
            
            with get_db_cursor(autocommit=True) as cursor:
                cursor.execute("""
                    INSERT INTO scheduled_job_history 
                    (job_id, success, processed_tables, success_tables, failed_tables, 
                     configs_processed, configs_failed, elapsed_seconds, error_message, executed_at)
                    VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                """, (
                    job_id,
                    success,
                    result.get('processed_tables', 0),
                    result.get('success_tables', 0), 
                    result.get('failed_tables', 0),
                    result.get('configs_processed', 0),
                    result.get('configs_failed', 0),
                    elapsed_time,
                    result.get('error', '') if not success else '',
                    datetime.now()
                ))
                
        except Exception as e:
            # 保存结果失败不应该影响主任务
            get_logger().warning(f"⚠️ 保存任务结果失败: {e}")
    
    def _job_listener(self, event):
        """任务事件监听器"""
        job = self.scheduler.get_job(event.job_id)
        if event.exception:
            get_logger().error(f"❌ 任务执行异常: {job.name} - {event.exception}")
        else:
            get_logger().info(f"✅ 任务执行成功: {job.name}")
    
    def _log_scheduled_jobs(self):
        """记录已调度的任务信息"""
        jobs = self.scheduler.get_jobs()
        if jobs:
            get_logger().info(f"📋 已调度任务列表 ({len(jobs)}个):")
            for job in jobs:
                next_run = job.next_run_time.strftime('%Y-%m-%d %H:%M:%S') if job.next_run_time else 'N/A'
                get_logger().info(f"  • {job.name} (ID: {job.id}) - 下次执行: {next_run}")
        else:
            get_logger().info("📋 当前没有已调度的任务")
    
    def get_job_status(self) -> Dict[str, Any]:
        """获取调度器和任务状态"""
        if not self.scheduler or not self.is_running:
            return {
                'scheduler_running': False,
                'jobs': []
            }
        
        jobs = []
        for job in self.scheduler.get_jobs():
            jobs.append({
                'id': job.id,
                'name': job.name,
                'next_run_time': job.next_run_time.isoformat() if job.next_run_time else None,
                'trigger': str(job.trigger),
                'enabled': True
            })
        
        return {
            'scheduler_running': self.is_running,
            'jobs': jobs,
            'total_jobs': len(jobs)
        }
    
    def enable_job(self, job_id: str) -> bool:
        """启用指定任务"""
        if job_id not in self._job_configs:
            return False
            
        try:
            config = self._job_configs[job_id]
            config['enabled'] = True
            
            # 如果调度器正在运行，添加任务
            if self.is_running:
                self._add_single_job(job_id, config)
                
            get_logger().info(f"✅ 启用定时任务: {config['name']}")
            return True
            
        except Exception as e:
            get_logger().error(f"❌ 启用任务失败 {job_id}: {e}")
            return False
    
    def disable_job(self, job_id: str) -> bool:
        """禁用指定任务"""
        if job_id not in self._job_configs:
            return False
            
        try:
            config = self._job_configs[job_id]
            config['enabled'] = False
            
            # 如果调度器正在运行，移除任务
            if self.is_running and self.scheduler.get_job(job_id):
                self.scheduler.remove_job(job_id)
                
            get_logger().info(f"🚫 禁用定时任务: {config['name']}")
            return True
            
        except Exception as e:
            get_logger().error(f"❌ 禁用任务失败 {job_id}: {e}")
            return False
    
    def trigger_job_now(self, job_id: str) -> bool:
        """立即触发指定任务"""
        if not self.is_running or job_id not in self._job_configs:
            return False
            
        try:
            config = self._job_configs[job_id]
            get_logger().info(f"🚀 手动触发任务: {config['name']}")
            
            # 直接调用执行函数
            self._execute_stats_collection(
                job_id=job_id,
                method=config['method'],
                stat_type=config.get('stat_type', 'manual'),
                database=config.get('database', ''),
                description=f"手动触发 - {config.get('description', '')}"
            )
            return True
            
        except Exception as e:
            get_logger().error(f"❌ 手动触发任务失败 {job_id}: {e}")
            return False


# 全局调度器实例
scheduler_instance = None


def get_scheduler() -> StarRocksScheduler:
    """获取全局调度器实例"""
    global scheduler_instance
    if scheduler_instance is None:
        scheduler_instance = StarRocksScheduler()
    return scheduler_instance


def start_scheduler():
    """启动全局调度器"""
    scheduler = get_scheduler()
    if not scheduler.is_running:
        scheduler.start()
    return scheduler


def stop_scheduler():
    """停止全局调度器"""
    global scheduler_instance
    if scheduler_instance:
        scheduler_instance.shutdown()
        scheduler_instance = None
