# dragonquant/data_handler.py

import pandas as pd
import numpy as np
import os
import logging
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class BaseDataHandler(ABC):
    """数据处理器的抽象基类，定义通用接口"""
    
    def __init__(self):
        self.cache = {}  # 数据缓存
        self.cache_enabled = True
    
    @abstractmethod
    def history(self, stock: str, fields: Union[str, List[str]], bar_count: int, dt) -> pd.DataFrame:
        """获取历史数据"""
        pass

    @abstractmethod
    def get_current_prices(self, dt) -> Dict[str, float]:
        """获取当前价格"""
        pass
    
    def validate_data(self, df: pd.DataFrame, stock: str) -> pd.DataFrame:
        """数据验证和清洗"""
        if df.empty:
            logger.warning(f"Empty data for {stock}")
            return df
        
        # 检查必要的列
        required_columns = ['open', 'high', 'low', 'close']
        missing_columns = [col for col in required_columns if col not in df.columns]
        if missing_columns:
            raise ValueError(f"Missing required columns for {stock}: {missing_columns}")
        
        # 检查数据类型
        for col in required_columns:
            if not pd.api.types.is_numeric_dtype(df[col]):
                logger.warning(f"Converting {col} to numeric for {stock}")
                df[col] = pd.to_numeric(df[col], errors='coerce')
        
        # 处理缺失值
        if df[required_columns].isnull().any().any():
            logger.warning(f"Found NaN values in {stock}, forward filling...")
            df[required_columns] = df[required_columns].fillna(method='ffill')
            df[required_columns] = df[required_columns].fillna(method='bfill')
        
        # 检查价格逻辑
        invalid_prices = (df['high'] < df['low']) | (df['high'] < df['open']) | (df['high'] < df['close']) | \
                        (df['low'] > df['open']) | (df['low'] > df['close'])
        if invalid_prices.any():
            logger.warning(f"Found invalid price data in {stock}, fixing...")
            # 修复价格逻辑错误
            df.loc[invalid_prices, 'high'] = df.loc[invalid_prices, ['open', 'close']].max(axis=1)
            df.loc[invalid_prices, 'low'] = df.loc[invalid_prices, ['open', 'close']].min(axis=1)
        
        # 删除重复的日期
        if df.index.duplicated().any():
            logger.warning(f"Found duplicate dates in {stock}, removing...")
            df = df[~df.index.duplicated(keep='last')]
        
        # 按日期排序
        df = df.sort_index()
        
        return df
    
    def get_cache_key(self, stock: str, fields: Union[str, List[str]], bar_count: int, dt) -> str:
        """生成缓存键值"""
        if isinstance(fields, list):
            fields_str = '_'.join(sorted(fields))
        else:
            fields_str = fields
        return f"{stock}_{fields_str}_{bar_count}_{dt.strftime('%Y%m%d')}"
    
    def clear_cache(self):
        """清空缓存"""
        self.cache.clear()
        logger.info("Data cache cleared")

class RealDataHandler(BaseDataHandler):
    """
    从本地CSV文件加载和提供真实数据的处理器。
    """
    def __init__(self, universe: List[str], data_path: str):
        super().__init__()
        self.universe = universe
        self.data_path = data_path
        self.all_data = self._load_all_data()
        logger.info(f"RealDataHandler initialized with {len(self.all_data)} stocks")

    def _load_all_data(self) -> Dict[str, pd.DataFrame]:
        """
        预加载股票池中所有股票的CSV数据到内存。
        """
        if not os.path.exists(self.data_path):
            raise FileNotFoundError(f"Data path does not exist: {self.data_path}")
            
        logger.info(f"Loading data from '{self.data_path}'...")
        data = {}
        failed_stocks = []
        
        for stock in self.universe:
            try:
                file_path = os.path.join(self.data_path, f"{stock}.csv")
                if not os.path.exists(file_path):
                    logger.warning(f"Data file not found for {stock}, skipping.")
                    failed_stocks.append(stock)
                    continue
                
                df = pd.read_csv(file_path)
                
                # 数据格式化和清洗
                if 'date' not in df.columns:
                    raise ValueError(f"Missing 'date' column in {stock}.csv")
                
                df['date'] = pd.to_datetime(df['date'], errors='coerce')
                if df['date'].isnull().any():
                    raise ValueError(f"Invalid dates found in {stock}.csv")
                
                df.set_index('date', inplace=True)
                df.sort_index(inplace=True)
                
                # 数据验证和清洗
                df = self.validate_data(df, stock)
                
                if len(df) == 0:
                    logger.warning(f"No valid data for {stock} after cleaning")
                    failed_stocks.append(stock)
                    continue
                
                data[stock] = df
                logger.info(f"  - Loaded {stock} ({len(df)} rows)")
                
            except Exception as e:
                logger.error(f"Failed to load data for {stock}: {str(e)}")
                failed_stocks.append(stock)
                continue
        
        if not data:
            raise FileNotFoundError("No data files were loaded. Please check the 'data_path' and file names.")
        
        if failed_stocks:
            logger.warning(f"Failed to load data for {len(failed_stocks)} stocks: {failed_stocks}")
            
        return data

    def history(self, stock: str, fields: Union[str, List[str]], bar_count: int, dt) -> pd.DataFrame:
        """获取历史数据"""
        # 检查缓存
        if self.cache_enabled:
            cache_key = self.get_cache_key(stock, fields, bar_count, dt)
            if cache_key in self.cache:
                return self.cache[cache_key]
        
        try:
            if stock not in self.all_data:
                logger.warning(f"No data available for {stock}")
                return pd.DataFrame()
            
            df = self.all_data[stock]
            
            # 筛选出指定日期及之前的数据
            filtered_df = df.loc[:dt]
            
            if len(filtered_df) == 0:
                logger.warning(f"No data available for {stock} before {dt}")
                return pd.DataFrame()
            
            # 获取指定字段和数量
            if isinstance(fields, str):
                if fields not in filtered_df.columns:
                    logger.error(f"Field '{fields}' not found for {stock}")
                    return pd.DataFrame()
                result = filtered_df[[fields]].tail(bar_count)
            else:
                missing_fields = [f for f in fields if f not in filtered_df.columns]
                if missing_fields:
                    logger.error(f"Fields {missing_fields} not found for {stock}")
                    return pd.DataFrame()
                result = filtered_df[fields].tail(bar_count)
            
            # 缓存结果
            if self.cache_enabled:
                self.cache[cache_key] = result.copy()
            
            return result
            
        except Exception as e:
            logger.error(f"Error getting history for {stock}: {str(e)}")
            return pd.DataFrame()

    def get_current_prices(self, dt) -> Dict[str, float]:
        """获取当前价格"""
        prices = {}
        
        for stock in self.universe:
            try:
                if stock in self.all_data and dt in self.all_data[stock].index:
                    price = self.all_data[stock].at[dt, 'close']
                    if pd.notna(price) and price > 0:
                        prices[stock] = float(price)
                    else:
                        logger.warning(f"Invalid price for {stock} on {dt}: {price}")
                else:
                    logger.debug(f"No price data for {stock} on {dt}")
            except Exception as e:
                logger.error(f"Error getting current price for {stock}: {str(e)}")
                continue
                
        return prices


class FakeDataHandler(BaseDataHandler):
    """生成伪数据的处理器，用于测试"""
    
    def __init__(self, universe: List[str], start_date: str, end_date: str, seed: int = 42):
        super().__init__()
        self.universe = universe
        self.start_date = start_date
        self.end_date = end_date
        
        # 设置随机种子以保证结果可重现
        np.random.seed(seed)
        
        self.dates = pd.date_range(self.start_date, self.end_date, freq='B')
        self.all_data = self._generate_fake_data()
        logger.info(f"FakeDataHandler initialized with {len(self.universe)} stocks, {len(self.dates)} trading days")

    def _generate_fake_data(self) -> Dict[str, pd.DataFrame]:
        """生成伪数据"""
        data = {}
        
        for stock in self.universe:
            try:
                # 生成更真实的价格数据
                initial_price = np.random.uniform(20, 200)  # 初始价格
                volatility = np.random.uniform(0.01, 0.05)  # 波动率
                trend = np.random.uniform(-0.0005, 0.0005)  # 趋势
                
                # 生成收盘价
                returns = np.random.normal(trend, volatility, len(self.dates))
                prices = initial_price * np.cumprod(1 + returns)
                
                # 生成OHLC数据
                df_data = {
                    'close': prices,
                    'open': prices * (1 + np.random.normal(0, 0.005, len(self.dates))),
                    'volume': np.random.lognormal(10, 1, len(self.dates))
                }
                
                # 生成高低价
                df_data['high'] = np.maximum(
                    np.maximum(df_data['open'], df_data['close']),
                    df_data['close'] * (1 + np.random.exponential(0.01, len(self.dates)))
                )
                
                df_data['low'] = np.minimum(
                    np.minimum(df_data['open'], df_data['close']),
                    df_data['close'] * (1 - np.random.exponential(0.01, len(self.dates)))
                )
                
                # 确保价格逻辑正确
                df_data['low'] = np.minimum(df_data['low'], np.minimum(df_data['open'], df_data['close']))
                df_data['high'] = np.maximum(df_data['high'], np.maximum(df_data['open'], df_data['close']))
                
                df = pd.DataFrame(df_data, index=self.dates)
                
                # 数据验证
                df = self.validate_data(df, stock)
                
                data[stock] = df
                
            except Exception as e:
                logger.error(f"Error generating fake data for {stock}: {str(e)}")
                # 生成简单的默认数据
                simple_prices = np.random.uniform(50, 150, len(self.dates))
                df = pd.DataFrame({
                    'open': simple_prices,
                    'high': simple_prices * 1.02,
                    'low': simple_prices * 0.98,
                    'close': simple_prices,
                    'volume': np.random.uniform(1000000, 10000000, len(self.dates))
                }, index=self.dates)
                data[stock] = df
                
        return data

    def history(self, stock: str, fields: Union[str, List[str]], bar_count: int, dt) -> pd.DataFrame:
        """获取历史数据"""
        try:
            if stock not in self.all_data:
                logger.warning(f"No fake data available for {stock}")
                return pd.DataFrame()
            
            df = self.all_data[stock]
            filtered_df = df.loc[:dt]
            
            if len(filtered_df) == 0:
                return pd.DataFrame()
            
            if isinstance(fields, str):
                if fields not in filtered_df.columns:
                    logger.error(f"Field '{fields}' not found in fake data for {stock}")
                    return pd.DataFrame()
                result = filtered_df[[fields]].tail(bar_count)
            else:
                missing_fields = [f for f in fields if f not in filtered_df.columns]
                if missing_fields:
                    logger.error(f"Fields {missing_fields} not found in fake data for {stock}")
                    return pd.DataFrame()
                result = filtered_df[fields].tail(bar_count)
            
            return result
            
        except Exception as e:
            logger.error(f"Error getting fake history for {stock}: {str(e)}")
            return pd.DataFrame()

    def get_current_prices(self, dt) -> Dict[str, float]:
        """获取当前价格"""
        prices = {}
        
        for stock in self.universe:
            try:
                if stock in self.all_data and dt in self.all_data[stock].index:
                    price = self.all_data[stock].at[dt, 'close']
                    if pd.notna(price) and price > 0:
                        prices[stock] = float(price)
                    else:
                        logger.warning(f"Invalid fake price for {stock} on {dt}: {price}")
            except Exception as e:
                logger.error(f"Error getting fake current price for {stock}: {str(e)}")
                continue
                
        return prices