"""
RedFire 实时数据处理引擎
======================

提供高性能的实时数据处理功能，包括：
- 数据清洗和验证
- 技术指标计算
- 异常检测
- 数据质量监控
"""

from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from enum import Enum
import asyncio
import logging
import numpy as np
from collections import deque
import statistics

from .models import MarketDataTick, MarketDataBar


class DataQuality(Enum):
    """数据质量等级"""
    EXCELLENT = "excellent"
    GOOD = "good"
    FAIR = "fair"
    POOR = "poor"
    BAD = "bad"


@dataclass
class TechnicalIndicators:
    """技术指标数据结构"""
    symbol: str
    timestamp: datetime
    
    # 移动平均线
    ma5: Optional[float] = None
    ma10: Optional[float] = None
    ma20: Optional[float] = None
    ma50: Optional[float] = None
    
    # 布林带
    bb_upper: Optional[float] = None
    bb_middle: Optional[float] = None
    bb_lower: Optional[float] = None
    
    # RSI
    rsi: Optional[float] = None
    
    # MACD
    macd: Optional[float] = None
    macd_signal: Optional[float] = None
    macd_histogram: Optional[float] = None
    
    # 成交量指标
    volume_ma5: Optional[float] = None
    volume_ma10: Optional[float] = None
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            "symbol": self.symbol,
            "timestamp": self.timestamp.isoformat(),
            "ma5": self.ma5,
            "ma10": self.ma10,
            "ma20": self.ma20,
            "ma50": self.ma50,
            "bb_upper": self.bb_upper,
            "bb_middle": self.bb_middle,
            "bb_lower": self.bb_lower,
            "rsi": self.rsi,
            "macd": self.macd,
            "macd_signal": self.macd_signal,
            "macd_histogram": self.macd_histogram,
            "volume_ma5": self.volume_ma5,
            "volume_ma10": self.volume_ma10
        }


@dataclass
class DataQualityMetrics:
    """数据质量指标"""
    symbol: str
    timestamp: datetime
    
    # 完整性指标
    completeness_score: float = 0.0  # 0-1
    missing_data_count: int = 0
    
    # 准确性指标
    accuracy_score: float = 0.0  # 0-1
    outlier_count: int = 0
    
    # 及时性指标
    timeliness_score: float = 0.0  # 0-1
    avg_delay_ms: float = 0.0
    
    # 一致性指标
    consistency_score: float = 0.0  # 0-1
    inconsistency_count: int = 0
    
    # 综合质量评分
    overall_quality: DataQuality = DataQuality.FAIR
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            "symbol": self.symbol,
            "timestamp": self.timestamp.isoformat(),
            "completeness_score": self.completeness_score,
            "missing_data_count": self.missing_data_count,
            "accuracy_score": self.accuracy_score,
            "outlier_count": self.outlier_count,
            "timeliness_score": self.timeliness_score,
            "avg_delay_ms": self.avg_delay_ms,
            "consistency_score": self.consistency_score,
            "inconsistency_count": self.inconsistency_count,
            "overall_quality": self.overall_quality.value
        }


class RealtimeDataEngine:
    """
    实时数据处理引擎
    
    提供高性能的实时数据处理功能，包括数据清洗、技术指标计算、
    异常检测和数据质量监控。
    """
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        初始化数据处理引擎
        
        Args:
            config: 配置参数
        """
        self.config = config or {}
        self.logger = logging.getLogger("redfire.data_engine")
        
        # 数据缓存
        self._tick_cache: Dict[str, deque] = {}  # symbol -> deque of ticks
        self._bar_cache: Dict[str, deque] = {}   # symbol -> deque of bars
        
        # 配置参数
        self.max_cache_size = self.config.get("max_cache_size", 1000)
        self.ma_periods = self.config.get("ma_periods", [5, 10, 20, 50])
        self.bb_period = self.config.get("bb_period", 20)
        self.bb_std = self.config.get("bb_std", 2.0)
        self.rsi_period = self.config.get("rsi_period", 14)
        self.macd_fast = self.config.get("macd_fast", 12)
        self.macd_slow = self.config.get("macd_slow", 26)
        self.macd_signal = self.config.get("macd_signal", 9)
        
        # 质量监控阈值
        self.quality_thresholds = {
            "excellent": 0.9,
            "good": 0.8,
            "fair": 0.6,
            "poor": 0.4
        }
        
        # 异常检测参数
        self.outlier_threshold = self.config.get("outlier_threshold", 3.0)  # 标准差倍数
        self.price_change_threshold = self.config.get("price_change_threshold", 0.1)  # 10%
        
        # 统计信息
        self._stats = {
            "ticks_processed": 0,
            "bars_processed": 0,
            "indicators_calculated": 0,
            "anomalies_detected": 0,
            "quality_checks": 0
        }
    
    async def process_tick_data(self, tick: MarketDataTick) -> Dict[str, Any]:
        """
        处理Tick数据
        
        Args:
            tick: Tick数据
            
        Returns:
            Dict: 处理结果
        """
        try:
            # 数据清洗
            cleaned_tick = await self._clean_tick_data(tick)
            if not cleaned_tick:
                return {"cleaned_tick": None, "indicators": None, "quality": None}
            
            # 更新缓存
            await self._update_tick_cache(cleaned_tick)
            
            # 计算技术指标
            indicators = await self._calculate_technical_indicators(cleaned_tick.symbol)
            
            # 数据质量检查
            quality = await self._check_data_quality(cleaned_tick.symbol, "tick")
            
            # 异常检测
            anomalies = await self._detect_anomalies(cleaned_tick)
            
            self._stats["ticks_processed"] += 1
            
            return {
                "cleaned_tick": cleaned_tick,
                "indicators": indicators,
                "quality": quality,
                "anomalies": anomalies
            }
            
        except Exception as e:
            self.logger.error(f"Tick数据处理失败: {e}")
            return {"cleaned_tick": None, "indicators": None, "quality": None}
    
    async def process_bar_data(self, bar: MarketDataBar) -> Dict[str, Any]:
        """
        处理K线数据
        
        Args:
            bar: K线数据
            
        Returns:
            Dict: 处理结果
        """
        try:
            # 数据清洗
            cleaned_bar = await self._clean_bar_data(bar)
            if not cleaned_bar:
                return {"cleaned_bar": None, "indicators": None, "quality": None}
            
            # 更新缓存
            await self._update_bar_cache(cleaned_bar)
            
            # 计算技术指标
            indicators = await self._calculate_technical_indicators(cleaned_bar.symbol)
            
            # 数据质量检查
            quality = await self._check_data_quality(cleaned_bar.symbol, "bar")
            
            self._stats["bars_processed"] += 1
            
            return {
                "cleaned_bar": cleaned_bar,
                "indicators": indicators,
                "quality": quality
            }
            
        except Exception as e:
            self.logger.error(f"K线数据处理失败: {e}")
            return {"cleaned_bar": None, "indicators": None, "quality": None}
    
    async def _clean_tick_data(self, tick: MarketDataTick) -> Optional[MarketDataTick]:
        """清洗Tick数据"""
        try:
            # 基本验证
            if not tick.symbol or not tick.price or tick.price <= 0:
                return None
            
            # 价格合理性检查
            if tick.price > 1000000 or tick.price < 0.01:  # 价格范围检查
                return None
            
            # 时间戳检查
            if tick.timestamp > datetime.now() + timedelta(minutes=5):  # 未来时间检查
                return None
            
            return tick
            
        except Exception as e:
            self.logger.error(f"Tick数据清洗失败: {e}")
            return None
    
    async def _clean_bar_data(self, bar: MarketDataBar) -> Optional[MarketDataBar]:
        """清洗K线数据"""
        try:
            # 基本验证
            if not bar.symbol or not bar.close or bar.close <= 0:
                return None
            
            # OHLC关系检查
            if bar.high < max(bar.open, bar.close) or bar.low > min(bar.open, bar.close):
                return None
            
            # 价格合理性检查
            if bar.close > 1000000 or bar.close < 0.01:
                return None
            
            return bar
            
        except Exception as e:
            self.logger.error(f"K线数据清洗失败: {e}")
            return None
    
    async def _update_tick_cache(self, tick: MarketDataTick) -> None:
        """更新Tick缓存"""
        symbol = tick.symbol
        if symbol not in self._tick_cache:
            self._tick_cache[symbol] = deque(maxlen=self.max_cache_size)
        
        self._tick_cache[symbol].append(tick)
    
    async def _update_bar_cache(self, bar: MarketDataBar) -> None:
        """更新K线缓存"""
        symbol = bar.symbol
        if symbol not in self._bar_cache:
            self._bar_cache[symbol] = deque(maxlen=self.max_cache_size)
        
        self._bar_cache[symbol].append(bar)
    
    async def _calculate_technical_indicators(self, symbol: str) -> Optional[TechnicalIndicators]:
        """计算技术指标"""
        try:
            # 获取K线数据
            if symbol not in self._bar_cache or len(self._bar_cache[symbol]) < max(self.ma_periods):
                return None
            
            bars = list(self._bar_cache[symbol])
            closes = [bar.close for bar in bars]
            volumes = [bar.volume for bar in bars]
            
            # 计算移动平均线
            ma_values = {}
            for period in self.ma_periods:
                if len(closes) >= period:
                    ma_values[f"ma{period}"] = np.mean(closes[-period:])
            
            # 计算布林带
            bb_upper, bb_middle, bb_lower = None, None, None
            if len(closes) >= self.bb_period:
                bb_middle = np.mean(closes[-self.bb_period:])
                bb_std = np.std(closes[-self.bb_period:])
                bb_upper = bb_middle + self.bb_std * bb_std
                bb_lower = bb_middle - self.bb_std * bb_std
            
            # 计算RSI
            rsi = None
            if len(closes) >= self.rsi_period + 1:
                rsi = self._calculate_rsi(closes, self.rsi_period)
            
            # 计算MACD
            macd, macd_signal, macd_histogram = None, None, None
            if len(closes) >= self.macd_slow:
                macd, macd_signal, macd_histogram = self._calculate_macd(closes)
            
            # 计算成交量移动平均
            volume_ma5 = None
            volume_ma10 = None
            if len(volumes) >= 5:
                volume_ma5 = np.mean(volumes[-5:])
            if len(volumes) >= 10:
                volume_ma10 = np.mean(volumes[-10:])
            
            indicators = TechnicalIndicators(
                symbol=symbol,
                timestamp=datetime.now(),
                ma5=ma_values.get("ma5"),
                ma10=ma_values.get("ma10"),
                ma20=ma_values.get("ma20"),
                ma50=ma_values.get("ma50"),
                bb_upper=bb_upper,
                bb_middle=bb_middle,
                bb_lower=bb_lower,
                rsi=rsi,
                macd=macd,
                macd_signal=macd_signal,
                macd_histogram=macd_histogram,
                volume_ma5=volume_ma5,
                volume_ma10=volume_ma10
            )
            
            self._stats["indicators_calculated"] += 1
            return indicators
            
        except Exception as e:
            self.logger.error(f"技术指标计算失败: {e}")
            return None
    
    def _calculate_rsi(self, closes: List[float], period: int) -> float:
        """计算RSI"""
        if len(closes) < period + 1:
            return 0.0
        
        deltas = [closes[i] - closes[i-1] for i in range(1, len(closes))]
        gains = [d if d > 0 else 0 for d in deltas]
        losses = [-d if d < 0 else 0 for d in deltas]
        
        avg_gain = np.mean(gains[-period:])
        avg_loss = np.mean(losses[-period:])
        
        if avg_loss == 0:
            return 100.0
        
        rs = avg_gain / avg_loss
        rsi = 100 - (100 / (1 + rs))
        return rsi
    
    def _calculate_macd(self, closes: List[float]) -> Tuple[float, float, float]:
        """计算MACD"""
        if len(closes) < self.macd_slow:
            return None, None, None
        
        # 计算EMA
        def ema(data, period):
            alpha = 2 / (period + 1)
            ema_values = [data[0]]
            for i in range(1, len(data)):
                ema_values.append(alpha * data[i] + (1 - alpha) * ema_values[-1])
            return ema_values
        
        ema_fast = ema(closes, self.macd_fast)
        ema_slow = ema(closes, self.macd_slow)
        
        macd_line = [ema_fast[i] - ema_slow[i] for i in range(len(ema_fast))]
        macd_signal_line = ema(macd_line, self.macd_signal)
        
        macd = macd_line[-1] if macd_line else None
        macd_signal = macd_signal_line[-1] if macd_signal_line else None
        macd_histogram = macd - macd_signal if macd and macd_signal else None
        
        return macd, macd_signal, macd_histogram
    
    async def _check_data_quality(self, symbol: str, data_type: str) -> DataQualityMetrics:
        """检查数据质量"""
        try:
            # 获取数据
            if data_type == "tick":
                data_cache = self._tick_cache.get(symbol, deque())
            else:
                data_cache = self._bar_cache.get(symbol, deque())
            
            if not data_cache:
                return DataQualityMetrics(symbol=symbol, timestamp=datetime.now())
            
            # 计算各项质量指标
            completeness_score = self._calculate_completeness(data_cache)
            accuracy_score = self._calculate_accuracy(data_cache)
            timeliness_score = self._calculate_timeliness(data_cache)
            consistency_score = self._calculate_consistency(data_cache)
            
            # 计算综合质量评分
            overall_score = (completeness_score + accuracy_score + 
                           timeliness_score + consistency_score) / 4
            
            # 确定质量等级
            if overall_score >= self.quality_thresholds["excellent"]:
                overall_quality = DataQuality.EXCELLENT
            elif overall_score >= self.quality_thresholds["good"]:
                overall_quality = DataQuality.GOOD
            elif overall_score >= self.quality_thresholds["fair"]:
                overall_quality = DataQuality.FAIR
            elif overall_score >= self.quality_thresholds["poor"]:
                overall_quality = DataQuality.POOR
            else:
                overall_quality = DataQuality.BAD
            
            quality = DataQualityMetrics(
                symbol=symbol,
                timestamp=datetime.now(),
                completeness_score=completeness_score,
                accuracy_score=accuracy_score,
                timeliness_score=timeliness_score,
                consistency_score=consistency_score,
                overall_quality=overall_quality
            )
            
            self._stats["quality_checks"] += 1
            return quality
            
        except Exception as e:
            self.logger.error(f"数据质量检查失败: {e}")
            return DataQualityMetrics(symbol=symbol, timestamp=datetime.now())
    
    def _calculate_completeness(self, data_cache: deque) -> float:
        """计算完整性评分"""
        if not data_cache:
            return 0.0
        
        # 检查数据字段完整性
        complete_count = 0
        for item in data_cache:
            if hasattr(item, 'symbol') and hasattr(item, 'price') and hasattr(item, 'timestamp'):
                complete_count += 1
        
        return complete_count / len(data_cache) if data_cache else 0.0
    
    def _calculate_accuracy(self, data_cache: deque) -> float:
        """计算准确性评分"""
        if len(data_cache) < 2:
            return 1.0
        
        # 检查价格变化是否合理
        prices = []
        for item in data_cache:
            if hasattr(item, 'price'):
                prices.append(item.price)
        
        if len(prices) < 2:
            return 1.0
        
        # 计算价格变化率
        price_changes = []
        for i in range(1, len(prices)):
            change_rate = abs(prices[i] - prices[i-1]) / prices[i-1]
            price_changes.append(change_rate)
        
        # 检查异常变化
        outlier_count = sum(1 for change in price_changes if change > self.price_change_threshold)
        accuracy_score = 1.0 - (outlier_count / len(price_changes))
        
        return max(0.0, accuracy_score)
    
    def _calculate_timeliness(self, data_cache: deque) -> float:
        """计算及时性评分"""
        if not data_cache:
            return 0.0
        
        # 检查数据延迟
        now = datetime.now()
        delays = []
        
        for item in data_cache:
            if hasattr(item, 'timestamp'):
                delay = (now - item.timestamp).total_seconds()
                delays.append(delay)
        
        if not delays:
            return 0.0
        
        # 计算平均延迟
        avg_delay = np.mean(delays)
        
        # 延迟评分（1秒内为满分，超过10秒为0分）
        if avg_delay <= 1.0:
            return 1.0
        elif avg_delay >= 10.0:
            return 0.0
        else:
            return 1.0 - (avg_delay - 1.0) / 9.0
    
    def _calculate_consistency(self, data_cache: deque) -> float:
        """计算一致性评分"""
        if len(data_cache) < 2:
            return 1.0
        
        # 检查数据格式一致性
        consistent_count = 0
        for item in data_cache:
            if (hasattr(item, 'symbol') and hasattr(item, 'price') and 
                hasattr(item, 'timestamp') and hasattr(item, 'volume')):
                consistent_count += 1
        
        return consistent_count / len(data_cache) if data_cache else 0.0
    
    async def _detect_anomalies(self, tick: MarketDataTick) -> List[Dict[str, Any]]:
        """检测异常"""
        anomalies = []
        
        try:
            symbol = tick.symbol
            
            # 价格异常检测
            if symbol in self._tick_cache and len(self._tick_cache[symbol]) > 10:
                recent_prices = [t.price for t in list(self._tick_cache[symbol])[-10:]]
                if recent_prices:
                    mean_price = np.mean(recent_prices)
                    std_price = np.std(recent_prices)
                    
                    if std_price > 0:
                        z_score = abs(tick.price - mean_price) / std_price
                        if z_score > self.outlier_threshold:
                            anomalies.append({
                                "type": "price_outlier",
                                "symbol": symbol,
                                "value": tick.price,
                                "expected_range": [mean_price - self.outlier_threshold * std_price,
                                                 mean_price + self.outlier_threshold * std_price],
                                "z_score": z_score,
                                "timestamp": tick.timestamp
                            })
            
            # 成交量异常检测
            if hasattr(tick, 'volume') and tick.volume:
                if symbol in self._tick_cache and len(self._tick_cache[symbol]) > 10:
                    recent_volumes = [t.volume for t in list(self._tick_cache[symbol])[-10:] 
                                    if hasattr(t, 'volume') and t.volume]
                    if recent_volumes:
                        mean_volume = np.mean(recent_volumes)
                        std_volume = np.std(recent_volumes)
                        
                        if std_volume > 0:
                            z_score = abs(tick.volume - mean_volume) / std_volume
                            if z_score > self.outlier_threshold:
                                anomalies.append({
                                    "type": "volume_outlier",
                                    "symbol": symbol,
                                    "value": tick.volume,
                                    "expected_range": [mean_volume - self.outlier_threshold * std_volume,
                                                     mean_volume + self.outlier_threshold * std_volume],
                                    "z_score": z_score,
                                    "timestamp": tick.timestamp
                                })
            
            if anomalies:
                self._stats["anomalies_detected"] += len(anomalies)
            
        except Exception as e:
            self.logger.error(f"异常检测失败: {e}")
        
        return anomalies
    
    def get_stats(self) -> Dict[str, Any]:
        """获取统计信息"""
        return {
            **self._stats,
            "cache_sizes": {
                "tick_cache": {symbol: len(cache) for symbol, cache in self._tick_cache.items()},
                "bar_cache": {symbol: len(cache) for symbol, cache in self._bar_cache.items()}
            }
        }
    
    async def health_check(self) -> Dict[str, Any]:
        """健康检查"""
        try:
            return {
                "status": "healthy",
                "engine": "RealtimeDataEngine",
                "stats": self.get_stats(),
                "cache_status": {
                    "tick_symbols": len(self._tick_cache),
                    "bar_symbols": len(self._bar_cache)
                }
            }
        except Exception as e:
            return {
                "status": "unhealthy",
                "engine": "RealtimeDataEngine",
                "error": str(e)
            }
