"""
定时任务调度器
使用 APScheduler 实现定时数据采集
"""

import logging
from datetime import datetime, timezone
from typing import List, Dict, Any
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR

import sys
from pathlib import Path

# 添加项目根目录到 Python 路径
sys.path.insert(0, str(Path(__file__).parent.parent))

from collector.skywalking_collector import SkyWalkingCollector
from database.db_manager import DatabaseManager


logger = logging.getLogger(__name__)


class CollectionScheduler:
    """数据采集调度器"""
    
    def __init__(
        self,
        skywalking_url: str,
        database_manager: DatabaseManager,
        service_names: List[str],
        collection_interval_minutes: int = 30,
        time_range_minutes: int = 30,
        initial_time_range_minutes: int = None
    ):
        """
        初始化调度器
        
        Args:
            skywalking_url: SkyWalking GraphQL API 地址
            database_manager: 数据库管理器
            service_names: 要监控的服务名称列表
            collection_interval_minutes: 采集间隔（分钟）
            time_range_minutes: 每次采集的时间范围（分钟）
            initial_time_range_minutes: 首次采集的时间范围（分钟），None 表示使用 time_range_minutes
        """
        self.collector = SkyWalkingCollector(skywalking_url)
        self.db_manager = database_manager
        self.service_names = service_names
        self.collection_interval = collection_interval_minutes
        self.time_range = time_range_minutes
        self.initial_time_range = initial_time_range_minutes if initial_time_range_minutes else time_range_minutes
        self._first_run = True  # 标记是否是首次运行
        
        # 创建调度器
        self.scheduler = BlockingScheduler()
        
        # 添加事件监听
        self.scheduler.add_listener(self._job_executed, EVENT_JOB_EXECUTED)
        self.scheduler.add_listener(self._job_error, EVENT_JOB_ERROR)
    
    def _job_executed(self, event):
        """任务执行成功回调"""
        logger.info(f"Job executed successfully: {event.job_id}")
    
    def _job_error(self, event):
        """任务执行失败回调"""
        logger.error(f"Job execution failed: {event.job_id}, exception: {event.exception}")
    
    def collect_data(self):
        """
        执行数据采集任务
        这是被调度器定时调用的主方法
        """
        logger.info("=" * 80)
        logger.info(f"Starting data collection at {datetime.utcnow()}")
        
        # 首次运行使用特殊的时间范围
        if self._first_run:
            current_time_range = self.initial_time_range
            if current_time_range != self.time_range:
                logger.info(f"🔄 First run: Using extended time range of {current_time_range} minutes for historical data backfill")
            self._first_run = False
        else:
            current_time_range = self.time_range
        
        logger.info("=" * 80)
        
        collection_stats = {
            'services_collected': 0,
            'endpoints_collected': 0,
            'traces_collected': 0,
            'metrics_collected': 0,
            'errors': []
        }
        
        try:
            # 1. 获取所有服务
            logger.info("Fetching all services...")
            all_services = self.collector.get_all_services(current_time_range)
            logger.info(f"Found {len(all_services)} services")
            
            # 显示所有可用的服务名称
            if all_services:
                logger.info(f"Available services: {[s['name'] for s in all_services[:10]]}" + 
                           (f" ... and {len(all_services) - 10} more" if len(all_services) > 10 else ""))
            
            # 2. 过滤需要监控的服务（支持模糊匹配）
            services_to_collect = []
            if self.service_names:
                # 检查是否配置了 '*' 通配符表示采集所有服务
                if '*' in self.service_names or 'all' in [s.lower() for s in self.service_names]:
                    services_to_collect = all_services
                    logger.info("Configured to collect ALL services")
                else:
                    for service in all_services:
                        service_name = service['name']
                        # 精确匹配或模糊匹配
                        for config_name in self.service_names:
                            if config_name in service_name or service_name == config_name:
                                services_to_collect.append(service)
                                break
                    
                    # 如果没有匹配到任何服务，给出警告
                    if not services_to_collect:
                        logger.warning(f"⚠️  No services matched the configured names: {self.service_names}")
                        logger.warning(f"⚠️  Please check your configuration. Available services:")
                        for service in all_services[:20]:
                            logger.warning(f"    - {service['name']}")
                        if len(all_services) > 20:
                            logger.warning(f"    ... and {len(all_services) - 20} more services")
            else:
                # 如果没有指定服务名称，采集所有服务
                services_to_collect = all_services
                logger.info("No service names configured, will collect ALL services")
            
            logger.info(f"Will collect data for {len(services_to_collect)} services: {[s['name'] for s in services_to_collect]}")
            
            # 3. 逐个采集服务数据
            for service in services_to_collect:
                service_id = service['id']
                service_name = service['name']
                
                logger.info(f"\n--- Collecting data for service: {service_name} (ID: {service_id}) ---")
                
                try:
                    # 保存或更新服务信息
                    self.db_manager.upsert_service(
                        service_id=service_id,
                        name=service_name
                    )
                    collection_stats['services_collected'] += 1
                    
                    # 采集端点
                    logger.info(f"  Collecting endpoints...")
                    endpoints = self.collector.get_service_endpoints(service_id)
                    logger.info(f"  Found {len(endpoints)} endpoints")
                    
                    for endpoint in endpoints:
                        try:
                            self.db_manager.upsert_endpoint(
                                service_id=service_id,
                                endpoint_id=endpoint['id'],
                                name=endpoint['name']
                            )
                            collection_stats['endpoints_collected'] += 1
                            
                            # 采集端点指标
                            self._collect_endpoint_metrics(
                                endpoint['id'],
                                endpoint['name'],
                                collection_stats,
                                current_time_range
                            )
                        except Exception as e:
                            error_msg = f"Failed to collect endpoint {endpoint['name']}: {str(e)}"
                            logger.error(f"  {error_msg}")
                            collection_stats['errors'].append(error_msg)
                    
                    # 采集服务指标
                    logger.info(f"  Collecting service metrics...")
                    self._collect_service_metrics(
                        service_id,
                        service_name,
                        collection_stats,
                        current_time_range
                    )
                    
                    # 采集 Trace
                    logger.info(f"  Collecting traces...")
                    traces = self.collector.get_service_traces(service_id, current_time_range)
                    logger.info(f"  Found {len(traces)} traces")
                    
                    if traces:
                        traces_data = []
                        for trace in traces:
                            # SkyWalking 10.x 返回的 start 是 Unix 毫秒时间戳
                            start_timestamp = int(trace['start']) / 1000  # 转换为秒
                            traces_data.append({
                                'trace_id': trace['traceIds'][0] if trace.get('traceIds') else trace['key'],
                                'segment_id': trace['key'],
                                'endpoint_names': trace.get('endpointNames', []),
                                'duration': trace['duration'],
                                'start_time': datetime.fromtimestamp(start_timestamp, tz=timezone.utc).replace(tzinfo=None),
                                'is_error': trace.get('isError', False)
                            })
                        
                        inserted = self.db_manager.insert_traces(service_id, traces_data)
                        collection_stats['traces_collected'] += inserted
                        logger.info(f"  Inserted {inserted} new traces")
                
                except Exception as e:
                    error_msg = f"Failed to collect service {service_name}: {str(e)}"
                    logger.error(error_msg)
                    collection_stats['errors'].append(error_msg)
            
            # 4. 记录采集日志
            status = 'success' if not collection_stats['errors'] else 'partial' if collection_stats['services_collected'] > 0 else 'failed'
            
            self.db_manager.insert_collection_log(
                status=status,
                message=f"Collected {collection_stats['services_collected']} services, {collection_stats['endpoints_collected']} endpoints, {collection_stats['traces_collected']} traces, {collection_stats['metrics_collected']} metrics",
                services_collected=collection_stats['services_collected'],
                endpoints_collected=collection_stats['endpoints_collected'],
                traces_collected=collection_stats['traces_collected'],
                metrics_collected=collection_stats['metrics_collected'],
                error_details={'errors': collection_stats['errors']} if collection_stats['errors'] else None
            )
            
            logger.info("\n" + "=" * 80)
            logger.info(f"Collection completed: {status}")
            logger.info(f"  Services: {collection_stats['services_collected']}")
            logger.info(f"  Endpoints: {collection_stats['endpoints_collected']}")
            logger.info(f"  Traces: {collection_stats['traces_collected']}")
            logger.info(f"  Metrics: {collection_stats['metrics_collected']}")
            logger.info(f"  Errors: {len(collection_stats['errors'])}")
            logger.info("=" * 80)
        
        except Exception as e:
            error_msg = f"Collection job failed: {str(e)}"
            logger.error(error_msg, exc_info=True)
            
            # 记录失败日志
            self.db_manager.insert_collection_log(
                status='failed',
                message=error_msg,
                error_details={'error': str(e)}
            )
    
    def _collect_service_metrics(
        self,
        service_id: str,
        service_name: str,
        stats: Dict[str, Any],
        time_range: int = None
    ):
        """采集服务指标"""
        if time_range is None:
            time_range = self.time_range
            
        metric_names = ['service_sla', 'service_cpm', 'service_resp_time', 'service_apdex']
        
        for metric_name in metric_names:
            try:
                metric_data = self.collector.get_service_metric(
                    metric_name,
                    service_name,
                    time_range
                )
                
                if metric_data:
                    time_series = self.collector.extract_metric_time_series(metric_data)
                    if time_series:
                        # 将时间序列数据保存到数据库
                        metrics_to_insert = []
                        
                        for timestamp, value in time_series:
                            metrics_to_insert.append({
                                'metric_name': metric_name,
                                'value': value,
                                'timestamp': timestamp
                            })
                        
                        if metrics_to_insert:
                            inserted = self.db_manager.insert_service_metrics(
                                service_id,
                                metrics_to_insert
                            )
                            stats['metrics_collected'] += inserted
                            logger.info(f"    Collected {len(time_series)} values for {metric_name}")
            
            except Exception as e:
                error_msg = f"Failed to collect service metric {metric_name}: {str(e)}"
                logger.warning(f"    {error_msg}")
                stats['errors'].append(error_msg)
    
    def _collect_endpoint_metrics(
        self,
        endpoint_id: str,
        endpoint_name: str,
        stats: Dict[str, Any],
        time_range: int = None
    ):
        """采集端点指标"""
        if time_range is None:
            time_range = self.time_range
            
        metric_names = ['endpoint_cpm', 'endpoint_resp_time']
        
        for metric_name in metric_names:
            try:
                metric_data = self.collector.get_endpoint_metric(
                    metric_name,
                    endpoint_name,
                    time_range
                )
                
                if metric_data:
                    time_series = self.collector.extract_metric_time_series(metric_data)
                    if time_series:
                        metrics_to_insert = []
                        
                        for timestamp, value in time_series:
                            metrics_to_insert.append({
                                'metric_name': metric_name,
                                'value': value,
                                'timestamp': timestamp
                            })
                        
                        if metrics_to_insert:
                            inserted = self.db_manager.insert_endpoint_metrics(
                                endpoint_id,
                                metrics_to_insert
                            )
                            stats['metrics_collected'] += inserted
            
            except Exception as e:
                error_msg = f"Failed to collect endpoint metric {metric_name}: {str(e)}"
                logger.debug(f"    {error_msg}")
                # 端点指标错误不记录到主错误列表，只记录到 debug 日志
    
    def start(self):
        """启动调度器"""
        # 添加定时任务
        # 每 N 分钟执行一次
        self.scheduler.add_job(
            self.collect_data,
            trigger=CronTrigger(minute=f'*/{self.collection_interval}'),
            id='collect_skywalking_data',
            name='Collect SkyWalking Data',
            replace_existing=True
        )
        
        logger.info(f"Scheduler configured to run every {self.collection_interval} minutes")
        logger.info("Starting scheduler...")
        
        # 立即执行一次
        logger.info("Running initial collection...")
        self.collect_data()
        
        # 启动调度器（阻塞）
        self.scheduler.start()
    
    def stop(self):
        """停止调度器"""
        logger.info("Stopping scheduler...")
        self.scheduler.shutdown(wait=True)
        logger.info("Scheduler stopped")

