"""数据流处理器

处理实时数据流，支持多种数据源和处理模式。
"""

import logging
import threading
import queue
import time
import json
from typing import Dict, List, Any, Optional, Callable
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from collections import deque, defaultdict

class StreamProcessor:
    """数据流处理器"""
    
    def __init__(self, buffer_size: int = 1000, batch_size: int = 100):
        """
        初始化流处理器
        
        Args:
            buffer_size: 缓冲区大小
            batch_size: 批处理大小
        """
        self.logger = logging.getLogger(__name__)
        self.buffer_size = buffer_size
        self.batch_size = batch_size
        
        # 数据缓冲区
        self.data_buffer = deque(maxlen=buffer_size)
        self.processing_queue = queue.Queue()
        
        # 处理状态
        self.is_running = False
        self.processing_thread = None
        
        # 统计信息
        self.stats = {
            'total_processed': 0,
            'processing_rate': 0,
            'last_update': datetime.now(),
            'errors': 0,
            'buffer_usage': 0
        }
        
        # 处理器和回调函数
        self.processors = []
        self.callbacks = []
        
        # 窗口统计
        self.window_stats = defaultdict(lambda: deque(maxlen=100))
        
    def add_processor(self, processor: Callable[[List[Dict]], Any]):
        """
        添加数据处理器
        
        Args:
            processor: 处理函数，接收数据列表，返回处理结果
        """
        self.processors.append(processor)
        self.logger.info(f"添加处理器: {processor.__name__}")
    
    def add_callback(self, callback: Callable[[Any], None]):
        """
        添加结果回调函数
        
        Args:
            callback: 回调函数，接收处理结果
        """
        self.callbacks.append(callback)
        self.logger.info(f"添加回调函数: {callback.__name__}")
    
    def start(self):
        """
        启动流处理器
        """
        if self.is_running:
            self.logger.warning("流处理器已在运行")
            return
        
        self.is_running = True
        self.processing_thread = threading.Thread(target=self._processing_loop)
        self.processing_thread.daemon = True
        self.processing_thread.start()
        
        self.logger.info("流处理器已启动")
    
    def stop(self):
        """
        停止流处理器
        """
        self.is_running = False
        
        if self.processing_thread:
            self.processing_thread.join(timeout=5)
        
        self.logger.info("流处理器已停止")
    
    def add_data(self, data: Dict[str, Any]):
        """
        添加数据到流中
        
        Args:
            data: 数据记录
        """
        try:
            # 添加时间戳
            if 'timestamp' not in data:
                data['timestamp'] = datetime.now().isoformat()
            
            # 添加到缓冲区
            self.data_buffer.append(data)
            
            # 更新统计
            self.stats['buffer_usage'] = len(self.data_buffer) / self.buffer_size * 100
            
            # 如果达到批处理大小，触发处理
            if len(self.data_buffer) >= self.batch_size:
                self._trigger_batch_processing()
                
        except Exception as e:
            self.logger.error(f"添加数据失败: {e}")
            self.stats['errors'] += 1
    
    def add_batch_data(self, data_list: List[Dict[str, Any]]):
        """
        批量添加数据
        
        Args:
            data_list: 数据记录列表
        """
        for data in data_list:
            self.add_data(data)
    
    def _trigger_batch_processing(self):
        """
        触发批处理
        """
        try:
            # 从缓冲区取出数据
            batch_data = []
            for _ in range(min(self.batch_size, len(self.data_buffer))):
                if self.data_buffer:
                    batch_data.append(self.data_buffer.popleft())
            
            if batch_data:
                self.processing_queue.put(batch_data)
                
        except Exception as e:
            self.logger.error(f"触发批处理失败: {e}")
    
    def _processing_loop(self):
        """
        处理循环
        """
        while self.is_running:
            try:
                # 获取批处理数据
                try:
                    batch_data = self.processing_queue.get(timeout=1)
                except queue.Empty:
                    continue
                
                # 处理数据
                start_time = time.time()
                results = self._process_batch(batch_data)
                processing_time = time.time() - start_time
                
                # 更新统计
                self.stats['total_processed'] += len(batch_data)
                self.stats['processing_rate'] = len(batch_data) / processing_time if processing_time > 0 else 0
                self.stats['last_update'] = datetime.now()
                
                # 调用回调函数
                for callback in self.callbacks:
                    try:
                        callback(results)
                    except Exception as e:
                        self.logger.error(f"回调函数执行失败: {e}")
                
                # 标记任务完成
                self.processing_queue.task_done()
                
            except Exception as e:
                self.logger.error(f"处理循环错误: {e}")
                self.stats['errors'] += 1
                time.sleep(0.1)
    
    def _process_batch(self, batch_data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        处理批数据
        
        Args:
            batch_data: 批数据
            
        Returns:
            处理结果
        """
        results = {
            'batch_size': len(batch_data),
            'timestamp': datetime.now().isoformat(),
            'processed_data': [],
            'statistics': {},
            'alerts': []
        }
        
        try:
            # 转换为DataFrame便于处理
            df = pd.DataFrame(batch_data)
            
            # 基本统计
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            if len(numeric_columns) > 0:
                results['statistics'] = {
                    'mean': df[numeric_columns].mean().to_dict(),
                    'std': df[numeric_columns].std().to_dict(),
                    'min': df[numeric_columns].min().to_dict(),
                    'max': df[numeric_columns].max().to_dict(),
                    'count': len(df)
                }
            
            # 应用自定义处理器
            for processor in self.processors:
                try:
                    processor_result = processor(batch_data)
                    results['processed_data'].append({
                        'processor': processor.__name__,
                        'result': processor_result
                    })
                except Exception as e:
                    self.logger.error(f"处理器 {processor.__name__} 执行失败: {e}")
            
            # 异常检测
            alerts = self._detect_anomalies(df)
            results['alerts'].extend(alerts)
            
            # 更新窗口统计
            self._update_window_stats(df)
            
        except Exception as e:
            self.logger.error(f"批处理失败: {e}")
            results['error'] = str(e)
        
        return results
    
    def _detect_anomalies(self, df: pd.DataFrame) -> List[Dict[str, Any]]:
        """
        检测异常值
        
        Args:
            df: 数据框
            
        Returns:
            异常警报列表
        """
        alerts = []
        
        try:
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            
            for column in numeric_columns:
                series = df[column].dropna()
                if len(series) == 0:
                    continue
                
                # 使用IQR方法检测异常值
                Q1 = series.quantile(0.25)
                Q3 = series.quantile(0.75)
                IQR = Q3 - Q1
                
                lower_bound = Q1 - 1.5 * IQR
                upper_bound = Q3 + 1.5 * IQR
                
                outliers = series[(series < lower_bound) | (series > upper_bound)]
                
                if len(outliers) > 0:
                    alerts.append({
                        'type': 'anomaly',
                        'column': column,
                        'outlier_count': len(outliers),
                        'outlier_percentage': len(outliers) / len(series) * 100,
                        'outlier_values': outliers.tolist(),
                        'bounds': {'lower': lower_bound, 'upper': upper_bound},
                        'timestamp': datetime.now().isoformat()
                    })
        
        except Exception as e:
            self.logger.error(f"异常检测失败: {e}")
        
        return alerts
    
    def _update_window_stats(self, df: pd.DataFrame):
        """
        更新窗口统计
        
        Args:
            df: 数据框
        """
        try:
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            
            for column in numeric_columns:
                series = df[column].dropna()
                if len(series) > 0:
                    self.window_stats[f'{column}_mean'].append(series.mean())
                    self.window_stats[f'{column}_std'].append(series.std())
                    self.window_stats[f'{column}_count'].append(len(series))
        
        except Exception as e:
            self.logger.error(f"更新窗口统计失败: {e}")
    
    def get_stats(self) -> Dict[str, Any]:
        """
        获取处理统计信息
        
        Returns:
            统计信息
        """
        current_stats = self.stats.copy()
        current_stats['buffer_size'] = len(self.data_buffer)
        current_stats['queue_size'] = self.processing_queue.qsize()
        current_stats['is_running'] = self.is_running
        
        # 添加窗口统计
        window_summary = {}
        for key, values in self.window_stats.items():
            if values:
                window_summary[key] = {
                    'current': values[-1],
                    'trend': 'increasing' if len(values) > 1 and values[-1] > values[-2] else 'stable',
                    'history_length': len(values)
                }
        
        current_stats['window_stats'] = window_summary
        
        return current_stats
    
    def get_recent_data(self, count: int = 10) -> List[Dict[str, Any]]:
        """
        获取最近的数据
        
        Args:
            count: 数据数量
            
        Returns:
            最近的数据列表
        """
        return list(self.data_buffer)[-count:] if self.data_buffer else []
    
    def clear_buffer(self):
        """
        清空缓冲区
        """
        self.data_buffer.clear()
        self.logger.info("缓冲区已清空")
    
    def export_stats(self, filepath: str):
        """
        导出统计信息
        
        Args:
            filepath: 导出文件路径
        """
        try:
            stats_data = {
                'export_time': datetime.now().isoformat(),
                'processor_stats': self.get_stats(),
                'window_stats': dict(self.window_stats)
            }
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(stats_data, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"统计信息已导出到: {filepath}")
            
        except Exception as e:
            self.logger.error(f"导出统计信息失败: {e}")

# 示例处理器函数
def sentiment_processor(data_list: List[Dict[str, Any]]) -> Dict[str, Any]:
    """
    情感分析处理器示例
    
    Args:
        data_list: 数据列表
        
    Returns:
        处理结果
    """
    # 简单的情感分析示例
    positive_keywords = ['好', '棒', '优秀', '满意', '喜欢', 'good', 'great', 'excellent']
    negative_keywords = ['差', '糟糕', '不满', '讨厌', 'bad', 'terrible', 'awful']
    
    sentiment_scores = []
    
    for data in data_list:
        text = str(data.get('text', '')).lower()
        
        positive_count = sum(1 for word in positive_keywords if word in text)
        negative_count = sum(1 for word in negative_keywords if word in text)
        
        if positive_count > negative_count:
            sentiment = 'positive'
            score = positive_count / (positive_count + negative_count + 1)
        elif negative_count > positive_count:
            sentiment = 'negative'
            score = -negative_count / (positive_count + negative_count + 1)
        else:
            sentiment = 'neutral'
            score = 0
        
        sentiment_scores.append({
            'sentiment': sentiment,
            'score': score,
            'positive_count': positive_count,
            'negative_count': negative_count
        })
    
    # 汇总结果
    avg_score = np.mean([s['score'] for s in sentiment_scores])
    sentiment_distribution = {
        'positive': sum(1 for s in sentiment_scores if s['sentiment'] == 'positive'),
        'negative': sum(1 for s in sentiment_scores if s['sentiment'] == 'negative'),
        'neutral': sum(1 for s in sentiment_scores if s['sentiment'] == 'neutral')
    }
    
    return {
        'average_sentiment_score': avg_score,
        'sentiment_distribution': sentiment_distribution,
        'individual_scores': sentiment_scores,
        'total_processed': len(data_list)
    }

def statistical_processor(data_list: List[Dict[str, Any]]) -> Dict[str, Any]:
    """
    统计分析处理器示例
    
    Args:
        data_list: 数据列表
        
    Returns:
        处理结果
    """
    try:
        df = pd.DataFrame(data_list)
        numeric_columns = df.select_dtypes(include=[np.number]).columns
        
        if len(numeric_columns) == 0:
            return {'error': '没有数值型数据'}
        
        stats = {}
        for column in numeric_columns:
            series = df[column].dropna()
            if len(series) > 0:
                stats[column] = {
                    'mean': float(series.mean()),
                    'median': float(series.median()),
                    'std': float(series.std()),
                    'min': float(series.min()),
                    'max': float(series.max()),
                    'count': int(len(series)),
                    'missing': int(df[column].isnull().sum())
                }
        
        return {
            'column_statistics': stats,
            'total_rows': len(df),
            'numeric_columns': len(numeric_columns)
        }
        
    except Exception as e:
        return {'error': str(e)}