"""
数据接口
=======

提供统一的数据输入接口，支持多种数据格式的输入和标准化。
"""

import numpy as np
import pandas as pd
from typing import Dict, List, Optional, Union, Any, Tuple
import warnings


class DataInterface:
    """
    数据接口类
    
    负责处理各种格式的输入数据，并将其标准化为numpy数组格式。
    """
    
    def __init__(self):
        """初始化数据接口"""
        self.required_fields = ['open', 'high', 'low', 'close', 'volume']
        self.optional_fields = ['vwap', 'returns', 'adv20']
        self.data_cache = {}
    
    def load_data(self, data: Union[Dict[str, np.ndarray], pd.DataFrame, str],
                  **kwargs) -> Dict[str, np.ndarray]:
        """
        加载数据
        
        Parameters:
        -----------
        data : Union[Dict[str, np.ndarray], pd.DataFrame, str]
            输入数据，可以是：
            - 字典：键为字段名，值为numpy数组
            - DataFrame：列为字段名
            - 字符串：文件路径
        **kwargs
            额外参数
            
        Returns:
        --------
        Dict[str, np.ndarray]
            标准化后的数据字典
        """
        if isinstance(data, dict):
            return self._load_from_dict(data, **kwargs)
        elif isinstance(data, pd.DataFrame):
            return self._load_from_dataframe(data, **kwargs)
        elif isinstance(data, str):
            return self._load_from_file(data, **kwargs)
        else:
            raise TypeError(f"Unsupported data type: {type(data)}")
    
    def _load_from_dict(self, data: Dict[str, np.ndarray], 
                       **kwargs) -> Dict[str, np.ndarray]:
        """从字典加载数据"""
        # 验证数据格式
        self._validate_dict_data(data)
        
        # 标准化数据
        standardized = {}
        for field, values in data.items():
            if isinstance(values, np.ndarray):
                standardized[field] = values.copy()
            else:
                standardized[field] = np.array(values)
        
        return standardized
    
    def _load_from_dataframe(self, df: pd.DataFrame, 
                           **kwargs) -> Dict[str, np.ndarray]:
        """从DataFrame加载数据"""
        # 检查必需字段
        missing_fields = [field for field in self.required_fields 
                         if field not in df.columns]
        if missing_fields:
            raise ValueError(f"Missing required fields: {missing_fields}")
        
        # 假设DataFrame的索引是股票代码，列是日期
        # 如果不是这种格式，需要进行转换
        if 'stock_id' in df.columns and 'date' in df.columns:
            # 长格式数据，需要透视
            return self._pivot_dataframe(df, **kwargs)
        else:
            # 宽格式数据，直接转换
            return self._convert_wide_dataframe(df, **kwargs)
    
    def _load_from_file(self, filepath: str, **kwargs) -> Dict[str, np.ndarray]:
        """从文件加载数据"""
        file_ext = filepath.lower().split('.')[-1]
        
        if file_ext in ['csv', 'txt']:
            df = pd.read_csv(filepath, **kwargs)
            return self._load_from_dataframe(df)
        elif file_ext in ['xlsx', 'xls']:
            df = pd.read_excel(filepath, **kwargs)
            return self._load_from_dataframe(df)
        elif file_ext in ['pkl', 'pickle']:
            data = pd.read_pickle(filepath)
            if isinstance(data, pd.DataFrame):
                return self._load_from_dataframe(data)
            elif isinstance(data, dict):
                return self._load_from_dict(data)
            else:
                raise TypeError(f"Unsupported pickle data type: {type(data)}")
        else:
            raise ValueError(f"Unsupported file format: {file_ext}")
    
    def _validate_dict_data(self, data: Dict[str, np.ndarray]) -> None:
        """验证字典数据格式"""
        # 检查必需字段
        missing_fields = [field for field in self.required_fields 
                         if field not in data]
        if missing_fields:
            raise ValueError(f"Missing required fields: {missing_fields}")
        
        # 检查数据类型和形状
        shapes = []
        for field, values in data.items():
            if not isinstance(values, (np.ndarray, list)):
                raise TypeError(f"Field {field} must be numpy array or list")
            
            arr = np.array(values) if not isinstance(values, np.ndarray) else values
            
            if arr.ndim != 2:
                raise ValueError(f"Field {field} must be 2D array (n_stocks, n_days)")
            
            shapes.append(arr.shape)
        
        # 检查形状一致性
        if not all(shape == shapes[0] for shape in shapes):
            raise ValueError("All data fields must have the same shape")
    
    def _pivot_dataframe(self, df: pd.DataFrame, 
                        stock_col: str = 'stock_id',
                        date_col: str = 'date',
                        **kwargs) -> Dict[str, np.ndarray]:
        """透视长格式DataFrame"""
        result = {}
        
        for field in self.required_fields + self.optional_fields:
            if field in df.columns:
                pivoted = df.pivot(index=stock_col, columns=date_col, values=field)
                result[field] = pivoted.values
        
        return result
    
    def _convert_wide_dataframe(self, df: pd.DataFrame, 
                              **kwargs) -> Dict[str, np.ndarray]:
        """转换宽格式DataFrame"""
        # 假设每个字段都有对应的列
        result = {}
        
        # 如果DataFrame是多级列索引（字段名，日期）
        if isinstance(df.columns, pd.MultiIndex):
            for field in self.required_fields + self.optional_fields:
                if field in df.columns.get_level_values(0):
                    field_data = df[field]
                    result[field] = field_data.values
        else:
            # 单级列索引，假设所有列都是同一字段的不同日期
            # 这种情况需要额外的参数指定字段名
            field_name = kwargs.get('field_name', 'close')
            result[field_name] = df.values
        
        return result
    
    def validate_input(self, data: Dict[str, np.ndarray]) -> bool:
        """
        验证输入数据
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
            
        Returns:
        --------
        bool
            数据是否有效
        """
        try:
            self._validate_dict_data(data)
            return True
        except (ValueError, TypeError) as e:
            warnings.warn(f"Data validation failed: {str(e)}")
            return False
    
    def normalize_format(self, data: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
        """
        标准化数据格式
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
            
        Returns:
        --------
        Dict[str, np.ndarray]
            标准化后的数据
        """
        normalized = {}
        
        for field, values in data.items():
            # 确保是numpy数组
            if not isinstance(values, np.ndarray):
                values = np.array(values)
            
            # 确保是float64类型
            if values.dtype != np.float64:
                values = values.astype(np.float64)
            
            # 确保是C连续的数组（为了numba优化）
            if not values.flags['C_CONTIGUOUS']:
                values = np.ascontiguousarray(values)
            
            normalized[field] = values
        
        return normalized
    
    def check_completeness(self, data: Dict[str, np.ndarray], 
                          min_valid_ratio: float = 0.8) -> Dict[str, float]:
        """
        检查数据完整性
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
        min_valid_ratio : float
            最小有效数据比例
            
        Returns:
        --------
        Dict[str, float]
            各字段的有效数据比例
        """
        completeness = {}
        
        for field, values in data.items():
            total_elements = values.size
            valid_elements = np.sum(~np.isnan(values))
            ratio = valid_elements / total_elements if total_elements > 0 else 0.0
            completeness[field] = ratio
            
            if ratio < min_valid_ratio:
                warnings.warn(f"Field {field} has low completeness: {ratio:.2%}")
        
        return completeness
    
    def get_data_info(self, data: Dict[str, np.ndarray]) -> Dict[str, Any]:
        """
        获取数据信息
        
        Parameters:
        -----------
        data : Dict[str, np.ndarray]
            输入数据
            
        Returns:
        --------
        Dict[str, Any]
            数据信息
        """
        if not data:
            return {}
        
        # 获取第一个字段的形状作为参考
        first_field = next(iter(data.values()))
        n_stocks, n_days = first_field.shape
        
        info = {
            'n_stocks': n_stocks,
            'n_days': n_days,
            'fields': list(data.keys()),
            'required_fields_present': all(field in data for field in self.required_fields),
            'optional_fields_present': [field for field in self.optional_fields if field in data],
            'data_types': {field: str(values.dtype) for field, values in data.items()},
            'memory_usage_mb': sum(values.nbytes for values in data.values()) / (1024 * 1024),
            'completeness': self.check_completeness(data)
        }
        
        return info
    
    def create_sample_data(self, n_stocks: int = 100, n_days: int = 252, 
                          seed: int = 42) -> Dict[str, np.ndarray]:
        """
        创建示例数据（用于测试）
        
        Parameters:
        -----------
        n_stocks : int
            股票数量
        n_days : int
            交易日数量
        seed : int
            随机种子
            
        Returns:
        --------
        Dict[str, np.ndarray]
            示例数据
        """
        np.random.seed(seed)
        
        # 生成价格数据
        initial_prices = np.random.uniform(10, 100, (n_stocks, 1))
        returns = np.random.normal(0, 0.02, (n_stocks, n_days - 1))
        
        # 累积收益生成价格序列
        price_ratios = np.cumprod(1 + returns, axis=1)
        prices = initial_prices * np.concatenate([np.ones((n_stocks, 1)), price_ratios], axis=1)
        
        # 生成OHLC数据
        close = prices
        high = close * np.random.uniform(1.0, 1.05, (n_stocks, n_days))
        low = close * np.random.uniform(0.95, 1.0, (n_stocks, n_days))
        open_price = np.roll(close, 1, axis=1)
        open_price[:, 0] = close[:, 0]  # 第一天开盘价等于收盘价
        
        # 生成成交量数据
        volume = np.random.lognormal(10, 1, (n_stocks, n_days))
        
        # 生成VWAP数据
        vwap = (high + low + close) / 3.0
        
        data = {
            'open': open_price,
            'high': high,
            'low': low,
            'close': close,
            'volume': volume,
            'vwap': vwap
        }
        
        return self.normalize_format(data)