"""
数据预处理器
===========

提供数据清洗、填充、对齐等预处理功能。
"""

import numpy as np
from typing import Dict, List, Optional, Union, Any, Tuple
import warnings


class DataPreprocessor:
    """
    数据预处理器类
    
    负责数据清洗、缺失值处理、异常值处理等预处理任务。
    """
    
    def __init__(self):
        """初始化数据预处理器"""
        self.fill_methods = ['forward', 'backward', 'linear', 'mean', 'median', 'zero']
        self.outlier_methods = ['clip', 'winsorize', 'remove']
    
    def preprocess_all(self, data: Dict[str, np.ndarray], 
                      config: Optional[Dict[str, Any]] = None) -> Dict[str, np.ndarray]:
        """
        执行完整的数据预处理流程
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
        config : Optional[Dict[str, Any]]
            预处理配置
            
        Returns:
        --------
        Dict[str, np.ndarray]
            预处理后的数据
        """
        if config is None:
            config = self._get_default_config()
        
        processed_data = data.copy()
        
        # 1. 填充缺失值
        if config.get('fill_missing', True):
            processed_data = self.fill_missing_data(
                processed_data, 
                method=config.get('fill_method', 'forward')
            )
        
        # 2. 处理异常值
        if config.get('handle_outliers', True):
            processed_data = self.handle_outliers(
                processed_data,
                method=config.get('outlier_method', 'winsorize'),
                limits=config.get('outlier_limits', (0.01, 0.99))
            )
        
        # 3. 数据对齐
        if config.get('align_timestamps', True):
            processed_data = self.align_timestamps(processed_data)
        
        # 4. 计算衍生字段
        if config.get('calculate_derived', True):
            processed_data = self.calculate_returns(processed_data)
            processed_data = self.calculate_technical_indicators(processed_data)
        
        # 5. 数据标准化
        if config.get('normalize', False):
            processed_data = self.normalize_data(processed_data)
        
        return processed_data
    
    def fill_missing_data(self, data: Dict[str, np.ndarray], 
                         method: str = 'forward') -> Dict[str, np.ndarray]:
        """
        填充缺失值
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
        method : str
            填充方法 ('forward', 'backward', 'linear', 'mean', 'median', 'zero')
            
        Returns:
        --------
        Dict[str, np.ndarray]
            填充后的数据
        """
        if method not in self.fill_methods:
            raise ValueError(f"Unknown fill method: {method}. Available: {self.fill_methods}")
        
        filled_data = {}
        
        for field, values in data.items():
            if isinstance(values, np.ndarray):
                filled_data[field] = self._fill_field_missing(values, method)
            else:
                filled_data[field] = values
        
        return filled_data
    
    def _fill_field_missing(self, values: np.ndarray, method: str) -> np.ndarray:
        """填充单个字段的缺失值"""
        result = values.copy()
        n_stocks, n_days = result.shape
        
        for i in range(n_stocks):
            stock_data = result[i, :]
            missing_mask = np.isnan(stock_data)
            
            if not np.any(missing_mask):
                continue  # 没有缺失值
            
            if method == 'forward':
                # 前向填充
                for j in range(1, n_days):
                    if np.isnan(stock_data[j]) and not np.isnan(stock_data[j-1]):
                        stock_data[j] = stock_data[j-1]
            
            elif method == 'backward':
                # 后向填充
                for j in range(n_days-2, -1, -1):
                    if np.isnan(stock_data[j]) and not np.isnan(stock_data[j+1]):
                        stock_data[j] = stock_data[j+1]
            
            elif method == 'linear':
                # 线性插值
                valid_indices = ~missing_mask
                if np.sum(valid_indices) >= 2:
                    valid_values = stock_data[valid_indices]
                    valid_positions = np.where(valid_indices)[0]
                    
                    # 对缺失位置进行插值
                    missing_positions = np.where(missing_mask)[0]
                    interpolated = np.interp(missing_positions, valid_positions, valid_values)
                    stock_data[missing_positions] = interpolated
            
            elif method == 'mean':
                # 用均值填充
                valid_data = stock_data[~missing_mask]
                if len(valid_data) > 0:
                    mean_value = np.mean(valid_data)
                    stock_data[missing_mask] = mean_value
            
            elif method == 'median':
                # 用中位数填充
                valid_data = stock_data[~missing_mask]
                if len(valid_data) > 0:
                    median_value = np.median(valid_data)
                    stock_data[missing_mask] = median_value
            
            elif method == 'zero':
                # 用零填充
                stock_data[missing_mask] = 0.0
            
            result[i, :] = stock_data
        
        return result
    
    def handle_outliers(self, data: Dict[str, np.ndarray], 
                       method: str = 'winsorize',
                       limits: Tuple[float, float] = (0.01, 0.99)) -> Dict[str, np.ndarray]:
        """
        处理异常值
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
        method : str
            处理方法 ('clip', 'winsorize', 'remove')
        limits : Tuple[float, float]
            分位数限制
            
        Returns:
        --------
        Dict[str, np.ndarray]
            处理后的数据
        """
        if method not in self.outlier_methods:
            raise ValueError(f"Unknown outlier method: {method}. Available: {self.outlier_methods}")
        
        processed_data = {}
        
        for field, values in data.items():
            if isinstance(values, np.ndarray):
                processed_data[field] = self._handle_field_outliers(values, method, limits)
            else:
                processed_data[field] = values
        
        return processed_data
    
    def _handle_field_outliers(self, values: np.ndarray, method: str, 
                              limits: Tuple[float, float]) -> np.ndarray:
        """处理单个字段的异常值"""
        result = values.copy()
        n_stocks, n_days = result.shape
        
        if method == 'clip' or method == 'winsorize':
            # 按日期截面处理异常值
            for j in range(n_days):
                day_data = result[:, j]
                valid_mask = ~np.isnan(day_data)
                
                if np.sum(valid_mask) > 2:
                    valid_data = day_data[valid_mask]
                    lower_bound = np.percentile(valid_data, limits[0] * 100)
                    upper_bound = np.percentile(valid_data, limits[1] * 100)
                    
                    # 截断异常值
                    clipped_data = np.clip(valid_data, lower_bound, upper_bound)
                    result[valid_mask, j] = clipped_data
        
        elif method == 'remove':
            # 将异常值设为NaN
            for j in range(n_days):
                day_data = result[:, j]
                valid_mask = ~np.isnan(day_data)
                
                if np.sum(valid_mask) > 2:
                    valid_data = day_data[valid_mask]
                    lower_bound = np.percentile(valid_data, limits[0] * 100)
                    upper_bound = np.percentile(valid_data, limits[1] * 100)
                    
                    # 标记异常值为NaN
                    outlier_mask = (day_data < lower_bound) | (day_data > upper_bound)
                    result[outlier_mask, j] = np.nan
        
        return result
    
    def align_timestamps(self, data: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
        """
        对齐时间戳（确保所有字段的时间维度一致）
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
            
        Returns:
        --------
        Dict[str, np.ndarray]
            对齐后的数据
        """
        if not data:
            return data
        
        # 获取所有字段的形状
        shapes = {field: values.shape for field, values in data.items() 
                 if isinstance(values, np.ndarray)}
        
        if not shapes:
            return data
        
        # 检查是否需要对齐
        unique_shapes = list(set(shapes.values()))
        if len(unique_shapes) <= 1:
            return data  # 已经对齐
        
        # 找到最小的公共维度
        min_stocks = min(shape[0] for shape in shapes.values())
        min_days = min(shape[1] for shape in shapes.values())
        
        aligned_data = {}
        for field, values in data.items():
            if isinstance(values, np.ndarray):
                # 截取到公共维度
                aligned_data[field] = values[:min_stocks, :min_days]
            else:
                aligned_data[field] = values
        
        return aligned_data
    
    def calculate_returns(self, data: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
        """
        计算收益率
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
            
        Returns:
        --------
        Dict[str, np.ndarray]
            包含收益率的数据
        """
        result = data.copy()
        
        if 'close' in data and 'returns' not in result:
            close = data['close']
            n_stocks, n_days = close.shape
            
            if n_days > 1:
                returns = np.empty_like(close)
                returns[:, 0] = np.nan  # 第一天没有收益率
                
                # 计算日收益率
                for j in range(1, n_days):
                    prev_close = close[:, j-1]
                    curr_close = close[:, j]
                    
                    # 避免除零
                    valid_mask = (prev_close > 1e-8) & (~np.isnan(prev_close)) & (~np.isnan(curr_close))
                    returns[valid_mask, j] = (curr_close[valid_mask] - prev_close[valid_mask]) / prev_close[valid_mask]
                    returns[~valid_mask, j] = np.nan
                
                result['returns'] = returns
        
        return result
    
    def calculate_technical_indicators(self, data: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
        """
        计算技术指标
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
            
        Returns:
        --------
        Dict[str, np.ndarray]
            包含技术指标的数据
        """
        result = data.copy()
        
        # 计算VWAP（如果没有提供）
        if ('vwap' not in result and 
            all(field in data for field in ['high', 'low', 'close', 'volume'])):
            high = data['high']
            low = data['low']
            close = data['close']
            volume = data['volume']
            
            # 简化的VWAP计算：典型价格
            typical_price = (high + low + close) / 3.0
            result['vwap'] = typical_price
        
        # 计算20日平均成交量
        if 'volume' in data and 'adv20' not in result:
            volume = data['volume']
            adv20 = self._rolling_mean(volume, 20)
            result['adv20'] = adv20
        
        return result
    
    def _rolling_mean(self, data: np.ndarray, window: int) -> np.ndarray:
        """计算滚动均值"""
        n_stocks, n_days = data.shape
        result = np.empty_like(data)
        result[:, :window-1] = np.nan
        
        for i in range(n_stocks):
            for j in range(window-1, n_days):
                window_data = data[i, j-window+1:j+1]
                valid_data = window_data[~np.isnan(window_data)]
                
                if len(valid_data) > 0:
                    result[i, j] = np.mean(valid_data)
                else:
                    result[i, j] = np.nan
        
        return result
    
    def normalize_data(self, data: Dict[str, np.ndarray], 
                      method: str = 'zscore') -> Dict[str, np.ndarray]:
        """
        数据标准化
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
        method : str
            标准化方法 ('zscore', 'minmax', 'robust')
            
        Returns:
        --------
        Dict[str, np.ndarray]
            标准化后的数据
        """
        normalized_data = {}
        
        for field, values in data.items():
            if isinstance(values, np.ndarray):
                normalized_data[field] = self._normalize_field(values, method)
            else:
                normalized_data[field] = values
        
        return normalized_data
    
    def _normalize_field(self, values: np.ndarray, method: str) -> np.ndarray:
        """标准化单个字段"""
        result = values.copy()
        n_stocks, n_days = result.shape
        
        if method == 'zscore':
            # Z-score标准化（按日期截面）
            for j in range(n_days):
                day_data = result[:, j]
                valid_mask = ~np.isnan(day_data)
                
                if np.sum(valid_mask) > 1:
                    valid_data = day_data[valid_mask]
                    mean_val = np.mean(valid_data)
                    std_val = np.std(valid_data)
                    
                    if std_val > 1e-8:
                        result[valid_mask, j] = (valid_data - mean_val) / std_val
                    else:
                        result[valid_mask, j] = 0.0
        
        elif method == 'minmax':
            # Min-Max标准化
            for j in range(n_days):
                day_data = result[:, j]
                valid_mask = ~np.isnan(day_data)
                
                if np.sum(valid_mask) > 1:
                    valid_data = day_data[valid_mask]
                    min_val = np.min(valid_data)
                    max_val = np.max(valid_data)
                    
                    if max_val > min_val:
                        result[valid_mask, j] = (valid_data - min_val) / (max_val - min_val)
                    else:
                        result[valid_mask, j] = 0.0
        
        elif method == 'robust':
            # 鲁棒标准化（使用中位数和MAD）
            for j in range(n_days):
                day_data = result[:, j]
                valid_mask = ~np.isnan(day_data)
                
                if np.sum(valid_mask) > 1:
                    valid_data = day_data[valid_mask]
                    median_val = np.median(valid_data)
                    mad = np.median(np.abs(valid_data - median_val))
                    
                    if mad > 1e-8:
                        result[valid_mask, j] = (valid_data - median_val) / mad
                    else:
                        result[valid_mask, j] = 0.0
        
        return result
    
    def _get_default_config(self) -> Dict[str, Any]:
        """获取默认预处理配置"""
        return {
            'fill_missing': True,
            'fill_method': 'forward',
            'handle_outliers': True,
            'outlier_method': 'winsorize',
            'outlier_limits': (0.01, 0.99),
            'align_timestamps': True,
            'calculate_derived': True,
            'normalize': False
        }
    
    def get_preprocessing_report(self, original_data: Dict[str, np.ndarray],
                               processed_data: Dict[str, np.ndarray]) -> Dict[str, Any]:
        """
        生成预处理报告
        
        Parameters:
        -----------
        original_data : Dict[str, np.ndarray]
            原始数据
        processed_data : Dict[str, np.ndarray]
            处理后数据
            
        Returns:
        --------
        Dict[str, Any]
            预处理报告
        """
        report = {
            'summary': {},
            'field_changes': {}
        }
        
        # 总体统计
        original_fields = set(original_data.keys())
        processed_fields = set(processed_data.keys())
        
        report['summary'] = {
            'original_fields': len(original_fields),
            'processed_fields': len(processed_fields),
            'added_fields': list(processed_fields - original_fields),
            'removed_fields': list(original_fields - processed_fields)
        }
        
        # 各字段变化
        for field in original_fields & processed_fields:
            orig_values = original_data[field]
            proc_values = processed_data[field]
            
            if isinstance(orig_values, np.ndarray) and isinstance(proc_values, np.ndarray):
                orig_missing = np.sum(np.isnan(orig_values))
                proc_missing = np.sum(np.isnan(proc_values))
                
                report['field_changes'][field] = {
                    'original_missing': orig_missing,
                    'processed_missing': proc_missing,
                    'missing_filled': orig_missing - proc_missing,
                    'shape_changed': orig_values.shape != proc_values.shape
                }
        
        return report