import logging
import numpy as np
import pandas as pd
from typing import Dict, List, Any, Optional
from datetime import datetime
import pickle
import json
from pathlib import Path
import os

logger = logging.getLogger(__name__)

class MetricDetectionResult:
    """
    指标异常检测结果类，封装检测结果并提供存储和恢复功能。
    """

    def __init__(
        self,
        anomaly_id: str,
        anomaly_timestamp: int,
        output_dir: Optional[str] = None
    ):
        """
        初始化指标异常检测结果对象

        参数:
            anomaly_id: 故障的唯一ID
            anomaly_timestamp: 故障发生的时间戳
            output_dir: 结果输出的根目录
        """
        self.anomaly_id = anomaly_id
        self.anomaly_timestamp = anomaly_timestamp
        # 确保所有结果有一个基础输出目录
        self.output_dir = Path(output_dir) / "metric_results" if output_dir else Path("./metric_results")
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # 检测状态
        self.detection_complete = False
        self.detection_timestamp = None

        # 检测结果
        # DataFrame: index=timestamp, columns=['cmdb_id', 'metric_name', 'entity_type', 'anomaly_score', 'is_anomaly']
        self.anomaly_scores_df = None
        # DataFrame: index=timestamp, columns=['cmdb_id', 'metric_name', 'entity_type', 'anomaly_label']
        self.anomaly_labels_df = None
        # Dict, 键为cmdb_id，值为异常区间列表
        self.anomaly_intervals_by_cmdb = {}

        # 结果存储的具体路径
        self.result_storage_dir = self.output_dir
        self.result_storage_dir.mkdir(parents=True, exist_ok=True)

    def set_detection_results(
        self,
        anomaly_scores_df: Optional[pd.DataFrame] = None,
        anomaly_intervals_by_cmdb: Optional[Dict[str, List[Dict[str, Any]]]] = None,
        anomaly_labels_df: Optional[pd.DataFrame] = None,
        complete: bool = True
    ) -> None:
        """
        设置检测结果

        参数:
            anomaly_scores_df: 包含异常分数和标签的DataFrame
            anomaly_intervals_by_cmdb: 按cmdb_id组织的异常区间信息
            anomaly_labels_df: 包含异常标签的DataFrame
            complete: 检测是否已完成
        """
        if anomaly_scores_df is not None:
            self.anomaly_scores_df = anomaly_scores_df
        if anomaly_intervals_by_cmdb is not None:
            self.anomaly_intervals_by_cmdb = anomaly_intervals_by_cmdb
        if anomaly_labels_df is not None:
            self.anomaly_labels_df = anomaly_labels_df

        self.detection_complete = complete
        if complete:
            self.detection_timestamp = datetime.now().timestamp()
            self.save()
        else:
            logger.info(f"故障 {self.anomaly_id} 的检测结果为部分完成，暂不保存")

    def save(self) -> None:
        """保存检测结果到磁盘"""
        # 保存异常分数DataFrame
        if self.anomaly_scores_df is not None and not self.anomaly_scores_df.empty:
            scores_file = self.result_storage_dir / "anomaly_scores.csv"
            self.anomaly_scores_df.to_csv(scores_file)
            logger.debug(f"异常分数已保存到 {scores_file}")
        
        # 保存异常标签DataFrame
        if self.anomaly_labels_df is not None and not self.anomaly_labels_df.empty:
            labels_file = self.result_storage_dir / "anomaly_labels.csv"
            self.anomaly_labels_df.to_csv(labels_file)
            logger.debug(f"异常标签已保存到 {labels_file}")
        
        # 保存异常区间信息 - 重组为新的数据结构
        intervals_file = self.result_storage_dir / "anomaly_intervals.json"
        
        # 重新组织异常区间数据
        reorganized_intervals = {}
        
        # 如果有异常分数数据，从中提取entity_type信息
        entity_type_map = {}
        if self.anomaly_scores_df is not None and not self.anomaly_scores_df.empty:
            if 'cmdb_id' in self.anomaly_scores_df.columns and 'metric_name' in self.anomaly_scores_df.columns and 'entity_type' in self.anomaly_scores_df.columns:
                type_df = self.anomaly_scores_df[['cmdb_id', 'metric_name', 'entity_type']].drop_duplicates()
                for _, row in type_df.iterrows():
                    entity_type_map[(row['cmdb_id'], row['metric_name'])] = row['entity_type']
        
        # 遍历原始异常区间数据
        for cmdb_id, intervals in self.anomaly_intervals_by_cmdb.items():
            if cmdb_id not in reorganized_intervals:
                reorganized_intervals[cmdb_id] = []
            
            # 按metric_name分组
            metric_intervals = {}
            for interval in intervals:
                metric_name = interval.get('metric_name')
                if not metric_name:
                    continue
                
                if metric_name not in metric_intervals:
                    entity_type = entity_type_map.get((cmdb_id, metric_name), "unknown")
                    metric_intervals[metric_name] = {
                        "metric_name": metric_name,
                        "entity_type": entity_type,
                        "intervals": []
                    }
                
                # 去除interval中的metric_name字段，避免重复
                interval_copy = interval.copy()
                if 'metric_name' in interval_copy:
                    del interval_copy['metric_name']
                
                metric_intervals[metric_name]['intervals'].append(interval_copy)
            
            # 对每个指标的intervals按start_timestamp排序
            for metric_name, metric_data in metric_intervals.items():
                metric_data['intervals'].sort(key=lambda x: x.get('start_timestamp', 0))
                reorganized_intervals[cmdb_id].append({metric_name: metric_data})
        
        with open(intervals_file, 'w', encoding='utf-8') as f:
            json.dump(self._serialize_dict(reorganized_intervals), f, ensure_ascii=False, indent=2)
        logger.debug(f"异常区间已保存到 {intervals_file}")
        
        # 保存摘要信息
        summary_file = self.result_storage_dir / "summary.json"
        with open(summary_file, 'w', encoding='utf-8') as f:
            json.dump(self._serialize_dict(self.get_summary()), f, ensure_ascii=False, indent=2)
        logger.debug(f"检测摘要已保存到 {summary_file}")
        
        # 保存对象本身
        pickle_file = self.result_storage_dir / "detection_result.pkl"
        with open(pickle_file, 'wb') as f:
            pickle.dump(self, f)
        
        logger.info(f"故障 {self.anomaly_id} 的指标检测结果已保存到 {self.result_storage_dir}")

    @classmethod
    def load(cls, anomaly_id: str, output_dir: str) -> Optional['MetricDetectionResult']:
        """
        从磁盘加载检测结果

        参数:
            anomaly_id: 故障ID
            output_dir: 存储结果的基础目录

        返回:
            成功加载则返回MetricDetectionResult对象，否则返回None
        """
        load_path_dir = Path(output_dir) / "metric_results"
        pickle_file = load_path_dir / "detection_result.pkl"

        if not pickle_file.exists():
            logger.debug(f"未找到指标检测结果文件: {pickle_file}")
            return None
        
        # 直接加载pickle文件
        with open(pickle_file, 'rb') as f:
            result = pickle.load(f)
        logger.debug(f"已从 {pickle_file} 加载指标检测结果")
        
        # 确保路径正确
        result.output_dir = Path(output_dir) / "metric_results"
        result.result_storage_dir = result.output_dir / f"metric_{result.anomaly_id}"
        
        return result

    def get_summary(self) -> Dict[str, Any]:
        """获取检测结果摘要"""
        # 统计异常区间数量和受影响的CMDB
        total_intervals = 0
        affected_cmdb_ids = []
        
        if self.anomaly_intervals_by_cmdb:
            affected_cmdb_ids = list(self.anomaly_intervals_by_cmdb.keys())
            for cmdb_id in affected_cmdb_ids:
                total_intervals += len(self.anomaly_intervals_by_cmdb[cmdb_id])
        
        return {
            'anomaly_id': self.anomaly_id,
            'anomaly_timestamp': self.anomaly_timestamp,
            'detection_complete': self.detection_complete,
            'detection_timestamp': self.detection_timestamp,
            'total_anomaly_intervals': total_intervals,
            'affected_cmdb_count': len(affected_cmdb_ids),
            'affected_cmdb_ids': affected_cmdb_ids
        }

    def _serialize_dict(self, data: Any) -> Any:
        """序列化数据为JSON可用格式"""
        if data is None:
            return None
        
        if isinstance(data, dict):
            return {str(k): self._serialize_dict(v) for k, v in data.items()}
        elif isinstance(data, list):
            return [self._serialize_dict(item) for item in data]
        elif isinstance(data, (datetime, pd.Timestamp)):
            return data.strftime('%Y-%m-%d %H:%M:%S')
        elif isinstance(data, np.integer):
            return int(data)
        elif isinstance(data, np.floating):
            return float(data)
        elif isinstance(data, np.ndarray):
            return data.tolist()
        elif isinstance(data, pd.DataFrame):
            return data.to_dict('records')
        elif isinstance(data, pd.Series):
            return data.to_dict()
        else:
            return data
