import logging
import numpy as np
import pandas as pd
from typing import Dict, List, Tuple, Any, Optional
from datetime import datetime
import threading
from pathlib import Path
from pyod.models.ecod import ECOD

from .metric_detection_result import MetricDetectionResult


logger = logging.getLogger(__name__)
log_lock = threading.Lock()

class MetricDetector:
    """
    基于ECOD算法的指标异常检测器。
    检测时间序列数据中的异常指标及其时间范围。
    """

    def __init__(
        self,
        contamination: float = 0.05,
        n_jobs: int = 1,
        min_anomaly_duration_points: int = 3,
        output_dir: Optional[str] = None
    ):
        """
        初始化指标异常检测器

        参数:
            contamination: ECOD模型的预期异常比例（训练时使用）
            n_jobs: ECOD算法的并行作业数
            min_anomaly_duration_points: 最小异常持续数据点数
            output_dir: 检测结果输出目录
        """
        self.contamination = contamination
        self.n_jobs = n_jobs
        self.min_anomaly_duration_points = min_anomaly_duration_points
        self.output_dir = Path(output_dir) if output_dir else Path("./result")
        self.output_dir.mkdir(parents=True, exist_ok=True)

        logger.info(f"初始化指标异常检测器: contamination={contamination}, "
                   f"n_jobs={n_jobs}, min_duration={min_anomaly_duration_points}")

    def detect_anomalies(
        self,
        metrics_df: pd.DataFrame,
        anomaly_event_timestamp: int,
        anomaly_id: str
    ) -> MetricDetectionResult:
        """
        在指标数据中检测异常

        参数:
            metrics_df: 指标数据DataFrame，包含timestamp, cmdb_id, metric_name, entity_type, value列
            anomaly_event_timestamp: 故障发生的Unix时间戳（秒）
            anomaly_id: 故障的唯一ID

        返回:
            包含检测结果的MetricDetectionResult对象
        """
        # 1. 尝试从缓存加载结果
        existing_result = MetricDetectionResult.load(anomaly_id, str(self.output_dir))
        if existing_result and existing_result.detection_complete:
            logger.info(f"从缓存恢复故障 {anomaly_id} 的指标异常检测结果")
            return existing_result
        
        # 2. 创建新的结果对象
        result = MetricDetectionResult(anomaly_id, anomaly_event_timestamp, str(self.output_dir))
        logger.info(f"开始对故障 {anomaly_id} 进行指标异常检测")

        # 3. 数据验证和预处理
        if not self._validate_and_preprocess_data(metrics_df, anomaly_id, result):
            return result
        
        # 设置时间索引
        metrics_df['timestamp'] = pd.to_datetime(metrics_df['timestamp'])
        metrics_df = metrics_df.set_index('timestamp').sort_index()
        
        # 4. 按指标分组处理
        grouped_metrics = metrics_df.groupby(['cmdb_id', 'metric_name', 'entity_type'])
        all_processed_dfs = []
        all_labels_dfs = []
        anomaly_intervals_by_key = {}

        # 5. 对每个指标组执行异常检测
        for group_key, group_data in grouped_metrics:
            if len(group_data) < 10:  # 跳过数据点不足的组
                continue
                
            # 提取组数据并划分训练集
            group_timestamps = group_data.index.to_series().apply(lambda dt: dt.timestamp()).values
            train_mask = group_timestamps < anomaly_event_timestamp
            
            # 获取训练和测试数据
            values = group_data[['value']].values
            train_values = values[train_mask]

                
            # 训练ECOD模型并获取异常标签与分数
            detector = ECOD(contamination=self.contamination, n_jobs=self.n_jobs)
            detector.fit(train_values)
            
            scores = detector.decision_function(values)
            labels = detector.predict(values)
            
            # 构建结果DataFrame
            result_df = group_data[['cmdb_id', 'metric_name', 'entity_type']].copy()
            result_df['anomaly_score'] = scores
            result_df['is_anomaly'] = labels
            all_processed_dfs.append(result_df)
            
            # 构建标签DataFrame
            label_df = group_data[['cmdb_id', 'metric_name', 'entity_type']].copy()
            label_df['anomaly_label'] = labels
            all_labels_dfs.append(label_df)
            
            # 寻找连续异常区间
            intervals = self._find_anomaly_intervals(result_df, 'is_anomaly', self.min_anomaly_duration_points)
            
            # 筛选出与故障时间相关的区间
            fault_time = pd.to_datetime(anomaly_event_timestamp, unit='s')
            # 确保时区一致性，将fault_time转换为tz-naive
            if fault_time.tzinfo is not None:
                fault_time = fault_time.replace(tzinfo=None)
                
            relevant_intervals = [
                interval for interval in intervals
                if (interval['end_time'].replace(tzinfo=None) >= fault_time or 
                    interval['start_time'].replace(tzinfo=None) >= fault_time)
            ]
            
            if relevant_intervals:
                anomaly_intervals_by_key[group_key] = relevant_intervals
                
        
        # 6. 合并结果
        final_df = pd.concat(all_processed_dfs) if all_processed_dfs else pd.DataFrame()
        labels_df = pd.concat(all_labels_dfs) if all_labels_dfs else pd.DataFrame()
        organized_intervals = self._organize_intervals_by_cmdb(anomaly_intervals_by_key)
        
        # 7. 保存结果
        result.set_detection_results(
            anomaly_scores_df=final_df,
            anomaly_intervals_by_cmdb=organized_intervals,
            anomaly_labels_df=labels_df,
            complete=True
        )
        
        # 8. 记录摘要信息
        with log_lock:
            summary = result.get_summary()
            logger.info(f"故障 {anomaly_id} 的指标异常检测完成。总异常区间: {summary['total_anomaly_intervals']}, "
                        f"影响CMDB数量: {summary['affected_cmdb_count']}.")
            
        return result
        
    def _validate_and_preprocess_data(self, metrics_df: pd.DataFrame, anomaly_id: str, 
                                     result: MetricDetectionResult) -> bool:
        """验证数据并进行预处理，返回数据是否有效"""
        # 检查数据是否为空
        if metrics_df.empty:
            logger.warning(f"指标数据为空 (anomaly_id: {anomaly_id})")
            result.set_detection_results(pd.DataFrame(), {}, pd.DataFrame(), complete=True)
            return False
            
        # 检查必要列是否存在
        if 'timestamp' not in metrics_df.columns:
            logger.error(f"指标数据缺少 'timestamp' 列 (anomaly_id: {anomaly_id})")
            result.set_detection_results(pd.DataFrame(), {}, pd.DataFrame(), complete=True)
            return False
            
        # 检查所有必要列
        required_cols = ['cmdb_id', 'metric_name', 'value', 'entity_type']
        if not all(col in metrics_df.columns for col in required_cols):
            logger.error(f"指标数据缺少必要列 (需要: {required_cols}) (anomaly_id: {anomaly_id})")
            result.set_detection_results(pd.DataFrame(), {}, pd.DataFrame(), complete=True)
            return False
            
        return True

    def _find_anomaly_intervals(
        self,
        df: pd.DataFrame, 
        anomaly_flag_column: str,
        min_duration_points: int
    ) -> List[Dict[str, Any]]:
        """
        在时间序列中找出连续的异常区间
        
        参数:
            df: 包含异常标记的DataFrame (时间戳索引)
            anomaly_flag_column: 标记异常的列名 (值为0或1)
            min_duration_points: 最小异常持续点数
            
        返回:
            异常区间列表
        """
        intervals = []
        
        if anomaly_flag_column not in df.columns or df.empty:
            return intervals
            
        # 当前区间变量
        current_start = None
        current_points = 0
        last_anomaly_time = None
        
        # 遍历每个数据点
        for i in range(len(df)):
            timestamp = df.index[i]
            is_anomaly = df.iloc[i][anomaly_flag_column] == 1
            
            if is_anomaly:
                # 开始新区间或继续当前区间
                if current_start is None:
                    current_start = timestamp
                current_points += 1
                last_anomaly_time = timestamp
            elif current_start is not None:
                # 区间结束，检查是否满足最小持续点数
                if current_points >= min_duration_points:
                    # 计算区间均值作为严重性指标
                    start_idx = df.index.get_loc(current_start)
                    end_idx = df.index.get_loc(last_anomaly_time)
                    interval_data = df.iloc[start_idx:end_idx+1]
                    severity = interval_data['anomaly_score'].mean()
                    
                    # 添加到结果列表
                    intervals.append({
                        'start_time': pd.Timestamp(current_start),
                        'end_time': pd.Timestamp(last_anomaly_time),
                        'start_timestamp': int(pd.Timestamp(current_start).timestamp()),
                        'end_timestamp': int(pd.Timestamp(last_anomaly_time).timestamp()),
                        'duration_points': current_points,
                        'severity': float(severity)
                    })
                    
                # 重置区间变量
                current_start = None
                current_points = 0
                last_anomaly_time = None
        
        # 处理序列末尾的异常区间
        if current_start is not None and current_points >= min_duration_points:
            start_idx = df.index.get_loc(current_start)
            end_idx = df.index.get_loc(last_anomaly_time)
            interval_data = df.iloc[start_idx:end_idx+1]
            severity = interval_data['anomaly_score'].mean()
            
            intervals.append({
                'start_time': pd.Timestamp(current_start),
                'end_time': pd.Timestamp(last_anomaly_time),
                'start_timestamp': int(pd.Timestamp(current_start).timestamp()),
                'end_timestamp': int(pd.Timestamp(last_anomaly_time).timestamp()),
                'duration_points': current_points,
                'severity': float(severity)
            })
            
        return intervals

    def _organize_intervals_by_cmdb(
        self,
        intervals_by_key: Dict[Tuple[str, str, str], List[Dict[str, Any]]]
    ) -> Dict[str, List[Dict[str, Any]]]:
        """
        将按 (cmdb_id, metric_name, entity_type) 组织的异常区间重组为按 cmdb_id 组织
        """
        result = {}
        
        for (cmdb_id, metric_name, entity_type), intervals in intervals_by_key.items():
            if not intervals:
                continue
                
            if cmdb_id not in result:
                result[cmdb_id] = []
                
            for interval in intervals:
                # 添加指标信息到区间中
                enhanced_interval = {
                    'metric_name': metric_name,
                    'entity_type': entity_type,
                    **interval  # 展开原始区间信息
                }
                result[cmdb_id].append(enhanced_interval)
                
        return result
