"""实时分析器

提供实时数据分析和监控功能。
"""

import logging
import threading
import time
from typing import Dict, List, Any, Optional, Callable
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from collections import deque, defaultdict
import json

class RealTimeAnalyzer:
    """实时分析器"""
    
    def __init__(self, window_size: int = 100, update_interval: float = 1.0):
        """
        初始化实时分析器
        
        Args:
            window_size: 滑动窗口大小
            update_interval: 更新间隔（秒）
        """
        self.logger = logging.getLogger(__name__)
        self.window_size = window_size
        self.update_interval = update_interval
        
        # 数据窗口
        self.data_window = deque(maxlen=window_size)
        
        # 分析结果缓存
        self.analysis_cache = {}
        self.last_analysis_time = datetime.now()
        
        # 监控指标
        self.metrics = defaultdict(lambda: deque(maxlen=window_size))
        self.alerts = deque(maxlen=50)
        
        # 阈值配置
        self.thresholds = {
            'anomaly_threshold': 2.0,  # 异常值阈值（标准差倍数）
            'trend_threshold': 0.05,   # 趋势变化阈值
            'variance_threshold': 1.5,  # 方差变化阈值
            'missing_data_threshold': 0.1  # 缺失数据阈值
        }
        
        # 运行状态
        self.is_running = False
        self.analysis_thread = None
        
        # 回调函数
        self.alert_callbacks = []
        self.update_callbacks = []
        
    def start(self):
        """
        启动实时分析
        """
        if self.is_running:
            self.logger.warning("实时分析器已在运行")
            return
        
        self.is_running = True
        self.analysis_thread = threading.Thread(target=self._analysis_loop)
        self.analysis_thread.daemon = True
        self.analysis_thread.start()
        
        self.logger.info("实时分析器已启动")
    
    def stop(self):
        """
        停止实时分析
        """
        self.is_running = False
        
        if self.analysis_thread:
            self.analysis_thread.join(timeout=5)
        
        self.logger.info("实时分析器已停止")
    
    def add_data(self, data: Dict[str, Any]):
        """
        添加数据到分析窗口
        
        Args:
            data: 数据记录
        """
        try:
            # 添加时间戳
            if 'timestamp' not in data:
                data['timestamp'] = datetime.now().isoformat()
            
            # 添加到窗口
            self.data_window.append(data)
            
            # 更新指标
            self._update_metrics(data)
            
        except Exception as e:
            self.logger.error(f"添加数据失败: {e}")
    
    def add_alert_callback(self, callback: Callable[[Dict[str, Any]], None]):
        """
        添加警报回调函数
        
        Args:
            callback: 警报回调函数
        """
        self.alert_callbacks.append(callback)
    
    def add_update_callback(self, callback: Callable[[Dict[str, Any]], None]):
        """
        添加更新回调函数
        
        Args:
            callback: 更新回调函数
        """
        self.update_callbacks.append(callback)
    
    def _analysis_loop(self):
        """
        分析循环
        """
        while self.is_running:
            try:
                if len(self.data_window) > 0:
                    # 执行分析
                    analysis_results = self._perform_analysis()
                    
                    # 检测异常和警报
                    alerts = self._detect_alerts(analysis_results)
                    
                    # 更新缓存
                    self.analysis_cache = analysis_results
                    self.last_analysis_time = datetime.now()
                    
                    # 调用回调函数
                    for callback in self.update_callbacks:
                        try:
                            callback(analysis_results)
                        except Exception as e:
                            self.logger.error(f"更新回调失败: {e}")
                    
                    # 处理警报
                    for alert in alerts:
                        self.alerts.append(alert)
                        for callback in self.alert_callbacks:
                            try:
                                callback(alert)
                            except Exception as e:
                                self.logger.error(f"警报回调失败: {e}")
                
                time.sleep(self.update_interval)
                
            except Exception as e:
                self.logger.error(f"分析循环错误: {e}")
                time.sleep(self.update_interval)
    
    def _perform_analysis(self) -> Dict[str, Any]:
        """
        执行实时分析
        
        Returns:
            分析结果
        """
        try:
            # 转换为DataFrame
            df = pd.DataFrame(list(self.data_window))
            
            if df.empty:
                return {'error': '没有数据'}
            
            analysis_results = {
                'timestamp': datetime.now().isoformat(),
                'window_size': len(df),
                'data_quality': self._analyze_data_quality(df),
                'statistical_summary': self._statistical_summary(df),
                'trend_analysis': self._trend_analysis(df),
                'anomaly_detection': self._anomaly_detection(df),
                'pattern_detection': self._pattern_detection(df)
            }
            
            return analysis_results
            
        except Exception as e:
            self.logger.error(f"执行分析失败: {e}")
            return {'error': str(e)}
    
    def _analyze_data_quality(self, df: pd.DataFrame) -> Dict[str, Any]:
        """
        分析数据质量
        
        Args:
            df: 数据框
            
        Returns:
            数据质量分析结果
        """
        try:
            total_cells = df.size
            missing_cells = df.isnull().sum().sum()
            missing_percentage = missing_cells / total_cells * 100 if total_cells > 0 else 0
            
            # 列级别的缺失情况
            column_missing = df.isnull().sum().to_dict()
            
            # 数据类型分布
            dtype_distribution = df.dtypes.value_counts().to_dict()
            
            # 重复行检测
            duplicate_rows = df.duplicated().sum()
            
            return {
                'total_rows': len(df),
                'total_columns': len(df.columns),
                'missing_percentage': missing_percentage,
                'column_missing': column_missing,
                'duplicate_rows': duplicate_rows,
                'dtype_distribution': {str(k): v for k, v in dtype_distribution.items()},
                'quality_score': max(0, 100 - missing_percentage - (duplicate_rows / len(df) * 10))
            }
            
        except Exception as e:
            self.logger.error(f"数据质量分析失败: {e}")
            return {'error': str(e)}
    
    def _statistical_summary(self, df: pd.DataFrame) -> Dict[str, Any]:
        """
        统计摘要
        
        Args:
            df: 数据框
            
        Returns:
            统计摘要
        """
        try:
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            
            if len(numeric_columns) == 0:
                return {'message': '没有数值型列'}
            
            summary = {}
            
            for column in numeric_columns:
                series = df[column].dropna()
                if len(series) > 0:
                    summary[column] = {
                        'mean': float(series.mean()),
                        'median': float(series.median()),
                        'std': float(series.std()),
                        'min': float(series.min()),
                        'max': float(series.max()),
                        'count': int(len(series)),
                        'skewness': float(series.skew()) if len(series) > 1 else 0,
                        'kurtosis': float(series.kurtosis()) if len(series) > 1 else 0
                    }
            
            return summary
            
        except Exception as e:
            self.logger.error(f"统计摘要失败: {e}")
            return {'error': str(e)}
    
    def _trend_analysis(self, df: pd.DataFrame) -> Dict[str, Any]:
        """
        趋势分析
        
        Args:
            df: 数据框
            
        Returns:
            趋势分析结果
        """
        try:
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            trends = {}
            
            for column in numeric_columns:
                series = df[column].dropna()
                if len(series) > 2:
                    # 计算线性趋势
                    x = np.arange(len(series))
                    slope = np.polyfit(x, series, 1)[0]
                    
                    # 趋势方向
                    if abs(slope) < self.thresholds['trend_threshold']:
                        direction = 'stable'
                    elif slope > 0:
                        direction = 'increasing'
                    else:
                        direction = 'decreasing'
                    
                    # 趋势强度
                    correlation = np.corrcoef(x, series)[0, 1] if len(series) > 1 else 0
                    
                    trends[column] = {
                        'slope': float(slope),
                        'direction': direction,
                        'strength': abs(correlation),
                        'correlation': float(correlation)
                    }
            
            return trends
            
        except Exception as e:
            self.logger.error(f"趋势分析失败: {e}")
            return {'error': str(e)}
    
    def _anomaly_detection(self, df: pd.DataFrame) -> Dict[str, Any]:
        """
        异常检测
        
        Args:
            df: 数据框
            
        Returns:
            异常检测结果
        """
        try:
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            anomalies = {}
            
            for column in numeric_columns:
                series = df[column].dropna()
                if len(series) > 3:
                    # Z-score方法
                    z_scores = np.abs((series - series.mean()) / series.std())
                    outliers = series[z_scores > self.thresholds['anomaly_threshold']]
                    
                    # IQR方法
                    Q1 = series.quantile(0.25)
                    Q3 = series.quantile(0.75)
                    IQR = Q3 - Q1
                    lower_bound = Q1 - 1.5 * IQR
                    upper_bound = Q3 + 1.5 * IQR
                    iqr_outliers = series[(series < lower_bound) | (series > upper_bound)]
                    
                    anomalies[column] = {
                        'zscore_outliers': len(outliers),
                        'iqr_outliers': len(iqr_outliers),
                        'outlier_percentage': len(outliers) / len(series) * 100,
                        'outlier_values': outliers.tolist()[:10],  # 最多显示10个
                        'bounds': {
                            'lower': float(lower_bound),
                            'upper': float(upper_bound)
                        }
                    }
            
            return anomalies
            
        except Exception as e:
            self.logger.error(f"异常检测失败: {e}")
            return {'error': str(e)}
    
    def _pattern_detection(self, df: pd.DataFrame) -> Dict[str, Any]:
        """
        模式检测
        
        Args:
            df: 数据框
            
        Returns:
            模式检测结果
        """
        try:
            patterns = {}
            
            # 检测周期性模式
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            
            for column in numeric_columns:
                series = df[column].dropna()
                if len(series) > 10:
                    # 简单的周期性检测
                    autocorr = []
                    max_lag = min(len(series) // 4, 20)
                    
                    for lag in range(1, max_lag):
                        if lag < len(series):
                            corr = np.corrcoef(series[:-lag], series[lag:])[0, 1]
                            autocorr.append(corr if not np.isnan(corr) else 0)
                    
                    if autocorr:
                        max_autocorr = max(autocorr)
                        best_lag = autocorr.index(max_autocorr) + 1
                        
                        patterns[column] = {
                            'max_autocorrelation': float(max_autocorr),
                            'best_lag': int(best_lag),
                            'is_periodic': max_autocorr > 0.5,
                            'autocorrelations': autocorr[:10]  # 前10个
                        }
            
            # 检测分类变量的模式
            categorical_columns = df.select_dtypes(include=['object']).columns
            
            for column in categorical_columns:
                value_counts = df[column].value_counts()
                if len(value_counts) > 0:
                    patterns[f'{column}_distribution'] = {
                        'most_common': value_counts.index[0],
                        'most_common_count': int(value_counts.iloc[0]),
                        'unique_values': len(value_counts),
                        'entropy': float(-sum(p * np.log2(p) for p in value_counts / len(df) if p > 0))
                    }
            
            return patterns
            
        except Exception as e:
            self.logger.error(f"模式检测失败: {e}")
            return {'error': str(e)}
    
    def _detect_alerts(self, analysis_results: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
        检测警报
        
        Args:
            analysis_results: 分析结果
            
        Returns:
            警报列表
        """
        alerts = []
        
        try:
            # 数据质量警报
            data_quality = analysis_results.get('data_quality', {})
            missing_percentage = data_quality.get('missing_percentage', 0)
            
            if missing_percentage > self.thresholds['missing_data_threshold'] * 100:
                alerts.append({
                    'type': 'data_quality',
                    'severity': 'warning',
                    'message': f'缺失数据过多: {missing_percentage:.1f}%',
                    'timestamp': datetime.now().isoformat(),
                    'details': {'missing_percentage': missing_percentage}
                })
            
            # 异常值警报
            anomalies = analysis_results.get('anomaly_detection', {})
            for column, anomaly_info in anomalies.items():
                if isinstance(anomaly_info, dict):
                    outlier_percentage = anomaly_info.get('outlier_percentage', 0)
                    if outlier_percentage > 10:  # 超过10%的异常值
                        alerts.append({
                            'type': 'anomaly',
                            'severity': 'warning',
                            'message': f'列 {column} 异常值过多: {outlier_percentage:.1f}%',
                            'timestamp': datetime.now().isoformat(),
                            'details': {'column': column, 'outlier_percentage': outlier_percentage}
                        })
            
            # 趋势变化警报
            trends = analysis_results.get('trend_analysis', {})
            for column, trend_info in trends.items():
                if isinstance(trend_info, dict):
                    slope = trend_info.get('slope', 0)
                    if abs(slope) > self.thresholds['trend_threshold'] * 10:  # 显著趋势变化
                        direction = 'rapid increase' if slope > 0 else 'rapid decrease'
                        alerts.append({
                            'type': 'trend_change',
                            'severity': 'info',
                            'message': f'列 {column} 出现 {direction}',
                            'timestamp': datetime.now().isoformat(),
                            'details': {'column': column, 'slope': slope, 'direction': direction}
                        })
            
        except Exception as e:
            self.logger.error(f"警报检测失败: {e}")
        
        return alerts
    
    def _update_metrics(self, data: Dict[str, Any]):
        """
        更新监控指标
        
        Args:
            data: 数据记录
        """
        try:
            # 更新数据到达率
            current_time = datetime.now()
            self.metrics['data_arrival_time'].append(current_time.timestamp())
            
            # 更新数值型指标
            for key, value in data.items():
                if isinstance(value, (int, float)) and not np.isnan(value):
                    self.metrics[f'{key}_values'].append(value)
            
            # 计算到达率
            if len(self.metrics['data_arrival_time']) > 1:
                recent_times = list(self.metrics['data_arrival_time'])[-10:]
                if len(recent_times) > 1:
                    time_diffs = [recent_times[i] - recent_times[i-1] for i in range(1, len(recent_times))]
                    avg_interval = np.mean(time_diffs)
                    self.metrics['data_rate'].append(1 / avg_interval if avg_interval > 0 else 0)
            
        except Exception as e:
            self.logger.error(f"更新指标失败: {e}")
    
    def get_current_analysis(self) -> Dict[str, Any]:
        """
        获取当前分析结果
        
        Returns:
            当前分析结果
        """
        return self.analysis_cache.copy()
    
    def get_recent_alerts(self, count: int = 10) -> List[Dict[str, Any]]:
        """
        获取最近的警报
        
        Args:
            count: 警报数量
            
        Returns:
            最近的警报列表
        """
        return list(self.alerts)[-count:] if self.alerts else []
    
    def get_metrics_summary(self) -> Dict[str, Any]:
        """
        获取指标摘要
        
        Returns:
            指标摘要
        """
        summary = {}
        
        for metric_name, values in self.metrics.items():
            if values and len(values) > 0:
                summary[metric_name] = {
                    'current': values[-1],
                    'average': np.mean(values),
                    'min': np.min(values),
                    'max': np.max(values),
                    'count': len(values)
                }
        
        return summary
    
    def set_threshold(self, threshold_name: str, value: float):
        """
        设置阈值
        
        Args:
            threshold_name: 阈值名称
            value: 阈值
        """
        if threshold_name in self.thresholds:
            self.thresholds[threshold_name] = value
            self.logger.info(f"阈值 {threshold_name} 已设置为 {value}")
        else:
            self.logger.warning(f"未知的阈值名称: {threshold_name}")
    
    def export_analysis_history(self, filepath: str):
        """
        导出分析历史
        
        Args:
            filepath: 导出文件路径
        """
        try:
            history_data = {
                'export_time': datetime.now().isoformat(),
                'current_analysis': self.analysis_cache,
                'recent_alerts': list(self.alerts),
                'metrics_summary': self.get_metrics_summary(),
                'thresholds': self.thresholds,
                'window_size': self.window_size,
                'update_interval': self.update_interval
            }
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(history_data, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"分析历史已导出到: {filepath}")
            
        except Exception as e:
            self.logger.error(f"导出分析历史失败: {e}")