from abc import ABC, abstractmethod
import dask.dataframe as dd
from typing import Dict, List
import numpy as np
import pandas as pd
from ..calculation.technical import TechnicalIndicator
from ..calculation.vectorized import VectorizedOperations
from ..storage.factor_storage import FactorStorage

class FactorBase(ABC):
    """因子计算基类"""
    
    def __init__(self, name: str, dependencies: List[str] = []):
        self.name = name
        self.dependencies = dependencies
        self.storage = FactorStorage()
        
    @abstractmethod
    def calculate(self, data: dd.DataFrame) -> dd.Series:
        """核心计算方法"""
        pass
    
    def update(self, new_data: dd.DataFrame):
        """增量更新计算"""
        return self.calculate(new_data)

class FactorEngine:
    """因子计算引擎"""
    
    def __init__(self, parallel_workers=4):
        self.factors: Dict[str, FactorBase] = {}
        self.parallel_workers = parallel_workers
        self.technical = TechnicalIndicator()
        self.vectorized = VectorizedOperations()
        
    def register_factor(self, factor: FactorBase):
        """注册因子计算器"""
        self.factors[factor.name] = factor
        self._validate_dependencies(factor)
        
    def _validate_dependencies(self, factor: FactorBase):
        """验证因子依赖关系"""
        for dep in factor.dependencies:
            if dep not in self.factors:
                raise MissingDependencyError(f"缺失依赖因子: {dep}")
                
    def compute_batch(self, data: dd.DataFrame, factors: List[str]) -> dd.DataFrame:
        """批量计算因子"""
        return data.map_partitions(
            self._compute_partition,
            factors=factors,
            meta=object
        ).persist()
    
    def _compute_partition(self, partition: dd.DataFrame, factors: List[str]):
        """分区计算逻辑"""
        results = {}
        for factor_name in factors:
            factor = self.factors[factor_name]
            dep_data = {dep: partition[dep] for dep in factor.dependencies}
            results[factor_name] = factor.calculate(partition, **dep_data)
        return dd.concat(results, axis=1)
    
    def compute_streaming(self, tick_data: Dict):
        """实时流式计算"""
        return {
            factor.name: factor.update(tick_data)
            for factor in self.factors.values()
            if factor.is_streaming
        }

class TechnicalFactor(FactorBase):
    """技术指标因子"""
    
    def __init__(self, config: Dict):
        super().__init__(config['name'], config.get('deps', []))
        self.indicator_type = config['type']
        self.params = config['params']
        
    def calculate(self, data: dd.DataFrame) -> dd.Series:
        calc_map = {
            'MA': self.technical.moving_average,
            'RSI': self.technical.rsi,
            'MACD': self.technical.macd
        }
        return calc_map[self.indicator_type](data, **self.params)

class CrossSectionalFactor(FactorBase):
    """横截面因子"""
    
    def calculate(self, data: dd.DataFrame) -> dd.Series:
        return self.vectorized.zscore(
            data.groupby('date').apply(self._calc_rank)
        )
        
    def _calc_rank(self, df: pd.DataFrame) -> pd.Series:
        return df['close'].rank(pct=True)