"""
增强版空调负荷预测数据预处理模块
基于Transformer架构的改进版本，包含高级时间特征工程
"""

import pandas as pd
import numpy as np
from typing import Tuple, Dict, List, Optional
import datetime
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.impute import SimpleImputer
import warnings
import json
warnings.filterwarnings('ignore')


class EnhancedHVACDataProcessor:
    """增强版空调负荷数据预处理器"""
    
    def __init__(self, config: dict = None):
        """
        初始化增强数据处理器
        
        Args:
            config: 配置字典，包含处理参数
        """
        self.config = config or self._default_config()
        
        # 标准化器
        self.load_scaler = StandardScaler()
        self.env_scaler = StandardScaler()
        
        # 缺失值处理器
        self.load_imputer = SimpleImputer(strategy='median')
        self.env_imputer = SimpleImputer(strategy='mean')
        
        # 是否已拟合
        self.is_fitted = False
        
        # 环境特征列名
        self.env_features = [
            'outdoor_temp',      # 室外温度 (°C)
            'outdoor_humidity',  # 室外湿度 (%)
        ]
        
        # 节假日查找表（可根据实际情况扩展）
        self.holiday_lookup = self._create_holiday_lookup()
        
    def _default_config(self) -> dict:
        """默认配置"""
        return {
            'seq_len': 168,          # 输入序列长度（一周）
            'pred_len': 24,          # 预测长度（一天）
            'outlier_threshold': 3,   # 异常值检测阈值
            'missing_threshold': 0.3, # 缺失值阈值
            'smooth_window': 3,       # 平滑窗口大小
            'cyclical_encoding': True, # 是否使用周期性编码
            'holiday_feature': True,   # 是否包含节假日特征
        }
    
    def _create_holiday_lookup(self) -> Dict[str, bool]:
        """
        创建节假日查找表
        这里提供一个简化版本，实际使用时应根据具体地区扩展
        """
        holidays_2024 = {
            # 国家法定节假日（示例）
            '2024-01-01': True,  # 元旦
            '2024-02-10': True,  # 春节
            '2024-02-11': True,  # 春节
            '2024-02-12': True,  # 春节
            '2024-02-13': True,  # 春节
            '2024-02-14': True,  # 春节
            '2024-02-15': True,  # 春节
            '2024-02-16': True,  # 春节
            '2024-02-17': True,  # 春节
            '2024-04-04': True,  # 清明节
            '2024-04-05': True,  # 清明节
            '2024-04-06': True,  # 清明节
            '2024-05-01': True,  # 劳动节
            '2024-05-02': True,  # 劳动节
            '2024-05-03': True,  # 劳动节
            '2024-05-04': True,  # 劳动节
            '2024-05-05': True,  # 劳动节
            '2024-06-10': True,  # 端午节
            '2024-09-15': True,  # 中秋节
            '2024-09-16': True,  # 中秋节
            '2024-09-17': True,  # 中秋节
            '2024-10-01': True,  # 国庆节
            '2024-10-02': True,  # 国庆节
            '2024-10-03': True,  # 国庆节
            '2024-10-04': True,  # 国庆节
            '2024-10-05': True,  # 国庆节
            '2024-10-06': True,  # 国庆节
            '2024-10-07': True,  # 国庆节
        }
        return holidays_2024
    
    def extract_cyclical_features(self, timestamps: pd.DatetimeIndex) -> np.ndarray:
        """
        提取周期性时间特征（使用正弦余弦编码）
        
        Args:
            timestamps: 时间戳序列
            
        Returns:
            cyclical_features: [n_samples, 6] 周期性特征
        """
        features = np.zeros((len(timestamps), 6))
        
        # 小时周期 (24小时)
        hour_angle = 2 * np.pi * timestamps.hour / 24
        features[:, 0] = np.sin(hour_angle)
        features[:, 1] = np.cos(hour_angle)
        
        # 星期周期 (7天)
        day_angle = 2 * np.pi * timestamps.dayofweek / 7
        features[:, 2] = np.sin(day_angle)
        features[:, 3] = np.cos(day_angle)
        
        # 年周期 (365天)
        year_day = timestamps.dayofyear
        year_angle = 2 * np.pi * year_day / 365
        features[:, 4] = np.sin(year_angle)
        features[:, 5] = np.cos(year_angle)
        
        return features
    
    def extract_categorical_features(self, timestamps: pd.DatetimeIndex) -> Dict[str, np.ndarray]:
        """
        提取分类时间特征（用于查表嵌入）
        
        Args:
            timestamps: 时间戳序列
            
        Returns:
            categorical_features: 分类特征字典
        """
        features = {}
        
        # 小时 (0-23)
        features['hour'] = timestamps.hour.values if hasattr(timestamps.hour, 'values') else timestamps.hour
        
        # 星期几 (0-6, 0=Monday)
        features['dayofweek'] = timestamps.dayofweek.values if hasattr(timestamps.dayofweek, 'values') else timestamps.dayofweek
        
        # 月份 (0-11)
        month_data = (timestamps.month - 1)
        features['month'] = month_data.values if hasattr(month_data, 'values') else month_data
        
        # 季节 (0-3)
        season_data = ((timestamps.month - 1) // 3)
        features['season'] = season_data.values if hasattr(season_data, 'values') else season_data
        
        # 是否节假日 (0-1)
        date_strings = timestamps.strftime('%Y-%m-%d')
        features['is_holiday'] = np.array([
            1 if date_str in self.holiday_lookup else 0 
            for date_str in date_strings
        ])
        
        # 是否周末 (0-1)
        weekend_data = (timestamps.dayofweek >= 5).astype(int)
        features['is_weekend'] = weekend_data.values if hasattr(weekend_data, 'values') else weekend_data
        
        # 工作时间段 (0-3: 夜间、上午、下午、晚上)
        hour = timestamps.hour
        work_period = np.zeros(len(timestamps), dtype=int)
        work_period[(hour >= 6) & (hour < 12)] = 1   # 上午
        work_period[(hour >= 12) & (hour < 18)] = 2  # 下午
        work_period[(hour >= 18) & (hour < 22)] = 3  # 晚上
        features['work_period'] = work_period
        
        return features
    
    def extract_statistical_features(self, load_data: np.ndarray, 
                                   window_sizes: List[int] = [24, 168]) -> np.ndarray:
        """
        提取负荷统计特征
        
        Args:
            load_data: 负荷数据
            window_sizes: 统计窗口大小列表
            
        Returns:
            statistical_features: 统计特征
        """
        n_samples = len(load_data)
        n_features = len(window_sizes) * 4  # 每个窗口：均值、标准差、最值
        features = np.zeros((n_samples, n_features))
        
        for i, window in enumerate(window_sizes):
            for j in range(n_samples):
                start_idx = max(0, j - window + 1)
                window_data = load_data[start_idx:j+1]
                
                if len(window_data) > 0:
                    base_idx = i * 4
                    features[j, base_idx] = np.mean(window_data)      # 均值
                    features[j, base_idx + 1] = np.std(window_data)   # 标准差
                    features[j, base_idx + 2] = np.min(window_data)   # 最小值
                    features[j, base_idx + 3] = np.max(window_data)   # 最大值
        
        return features
    
    def detect_outliers(self, data: np.ndarray, threshold: float = 3) -> np.ndarray:
        """基于Z-score的异常值检测"""
        z_scores = np.abs((data - np.mean(data)) / (np.std(data) + 1e-8))
        return z_scores > threshold
    
    def smooth_data(self, data: np.ndarray, window: int = 3) -> np.ndarray:
        """数据平滑处理"""
        if len(data) < window:
            return data
        
        # 使用卷积进行平滑
        kernel = np.ones(window) / window
        smoothed = np.convolve(data, kernel, mode='same')
        
        # 处理边界效应
        smoothed[:window//2] = data[:window//2]
        smoothed[-(window//2):] = data[-(window//2):]
        
        return smoothed
    
    def fit(self, load_data: np.ndarray, env_data: np.ndarray, 
            timestamps: pd.DatetimeIndex):
        """
        拟合预处理器
        
        Args:
            load_data: 负荷数据 [n_samples]
            env_data: 环境数据 [n_samples, 2] (温度, 湿度)
            timestamps: 时间戳
        """
        print("正在拟合增强数据预处理器...")
        
        # 处理缺失值
        load_data_clean = self.load_imputer.fit_transform(load_data.reshape(-1, 1)).flatten()
        env_data_clean = self.env_imputer.fit_transform(env_data)
        
        # 异常值检测和处理
        load_outliers = self.detect_outliers(load_data_clean, self.config['outlier_threshold'])
        print(f"检测到 {np.sum(load_outliers)} 个负荷异常值")
        
        # 用中位数替换异常值
        load_data_clean[load_outliers] = np.median(load_data_clean[~load_outliers])
        
        # 拟合标准化器
        self.load_scaler.fit(load_data_clean.reshape(-1, 1))
        self.env_scaler.fit(env_data_clean)
        
        self.is_fitted = True
        print("增强数据预处理器拟合完成")
    
    def transform(self, load_data: np.ndarray, env_data: np.ndarray, 
                 timestamps: pd.DatetimeIndex) -> Dict[str, np.ndarray]:
        """
        转换数据
        
        Args:
            load_data: 负荷数据
            env_data: 环境数据
            timestamps: 时间戳
            
        Returns:
            processed_data: 处理后的数据字典
        """
        if not self.is_fitted:
            raise ValueError("预处理器尚未拟合，请先调用 fit() 方法")
        
        # 处理缺失值
        load_data_clean = self.load_imputer.transform(load_data.reshape(-1, 1)).flatten()
        env_data_clean = self.env_imputer.transform(env_data)
        
        # 异常值处理
        load_outliers = self.detect_outliers(load_data_clean, self.config['outlier_threshold'])
        load_data_clean[load_outliers] = np.median(load_data_clean[~load_outliers])
        
        # 数据平滑
        if self.config['smooth_window'] > 1:
            load_data_clean = self.smooth_data(load_data_clean, self.config['smooth_window'])
        
        # 标准化
        processed_load = self.load_scaler.transform(load_data_clean.reshape(-1, 1)).flatten()
        processed_env = self.env_scaler.transform(env_data_clean)
        
        # 提取特征
        result = {
            'load': processed_load,
            'env': processed_env,
        }
        
        # 周期性时间特征
        if self.config['cyclical_encoding']:
            result['cyclical_features'] = self.extract_cyclical_features(timestamps)
        
        # 分类时间特征（用于嵌入）
        categorical_features = self.extract_categorical_features(timestamps)
        result.update(categorical_features)
        
        # 统计特征
        statistical_features = self.extract_statistical_features(load_data_clean)
        result['statistical_features'] = statistical_features
        
        return result
    
    def inverse_transform_load(self, scaled_load: np.ndarray) -> np.ndarray:
        """反标准化负荷数据"""
        return self.load_scaler.inverse_transform(scaled_load.reshape(-1, 1)).flatten()
    
    def create_sequences(self, processed_data: Dict[str, np.ndarray]) -> Tuple[Dict[str, np.ndarray], np.ndarray]:
        """
        创建训练序列
        
        Args:
            processed_data: 处理后的数据字典
            
        Returns:
            X: 输入特征字典
            y: 目标序列
        """
        seq_len = self.config['seq_len']
        pred_len = self.config['pred_len']
        
        load_data = processed_data['load']
        n_samples = len(load_data) - seq_len - pred_len + 1
        
        if n_samples <= 0:
            raise ValueError(f"数据长度不足，需要至少 {seq_len + pred_len} 个样本")
        
        # 准备输出字典
        X = {}
        
        # 连续特征
        continuous_features = ['load', 'env', 'cyclical_features', 'statistical_features']
        for feature_name in continuous_features:
            if feature_name in processed_data:
                feature_data = processed_data[feature_name]
                if feature_data.ndim == 1:
                    feature_data = feature_data.reshape(-1, 1)
                
                X[feature_name] = np.zeros((n_samples, seq_len, feature_data.shape[1]))
                for i in range(n_samples):
                    X[feature_name][i] = feature_data[i:i+seq_len]
        
        # 分类特征
        categorical_features = ['hour', 'dayofweek', 'month', 'season', 'is_holiday', 'is_weekend', 'work_period']
        for feature_name in categorical_features:
            if feature_name in processed_data:
                feature_data = processed_data[feature_name]
                X[feature_name] = np.zeros((n_samples, seq_len), dtype=np.int64)
                for i in range(n_samples):
                    X[feature_name][i] = feature_data[i:i+seq_len]
        
        # 目标序列
        y = np.zeros((n_samples, pred_len, 1))
        for i in range(n_samples):
            y[i] = load_data[i+seq_len:i+seq_len+pred_len].reshape(-1, 1)
        
        return X, y
    
    def get_feature_info(self) -> Dict:
        """获取特征信息"""
        return {
            'env_features': self.env_features,
            'cyclical_features': ['hour_sin', 'hour_cos', 'day_sin', 'day_cos', 'year_sin', 'year_cos'],
            'categorical_features': {
                'hour': 24,
                'dayofweek': 7,
                'month': 12,
                'season': 4,
                'is_holiday': 2,
                'is_weekend': 2,
                'work_period': 4
            },
            'statistical_features': ['24h_mean', '24h_std', '24h_min', '24h_max', 
                                   '168h_mean', '168h_std', '168h_min', '168h_max'],
        }


def load_enhanced_sample_data() -> Tuple[np.ndarray, np.ndarray, pd.DatetimeIndex]:
    """
    生成增强版示例数据
    
    Returns:
        load_data: 负荷数据
        env_data: 环境数据 (温度, 湿度)
        timestamps: 时间戳
    """
    # 生成两个月的小时级数据
    n_hours = 24 * 60  # 60天
    timestamps = pd.date_range('2024-01-01', periods=n_hours, freq='H')
    
    # 生成模拟负荷数据
    hours = np.arange(n_hours) % 24
    days = np.arange(n_hours) // 24
    
    # 基础负荷模式（白天高，夜晚低）
    base_load = 100 + 50 * np.sin(2 * np.pi * hours / 24 - np.pi/2)
    
    # 周循环（工作日vs周末）
    week_pattern = 1 + 0.2 * np.sin(2 * np.pi * days / 7)
    
    # 季节性影响
    season_pattern = 1 + 0.3 * np.sin(2 * np.pi * days / 365)
    
    # 添加噪声
    noise = np.random.normal(0, 5, n_hours)
    
    load_data = base_load * week_pattern * season_pattern + noise
    
    # 生成环境数据
    env_data = np.zeros((n_hours, 2))
    
    # 温度数据（带季节性和日变化）
    daily_temp_variation = 5 * np.sin(2 * np.pi * hours / 24 - np.pi/2)
    seasonal_temp = 20 + 15 * np.sin(2 * np.pi * days / 365 - np.pi/2)
    env_data[:, 0] = seasonal_temp + daily_temp_variation + np.random.normal(0, 2, n_hours)
    
    # 湿度数据（与温度反相关）
    env_data[:, 1] = 70 - 0.5 * env_data[:, 0] + np.random.normal(0, 5, n_hours)
    env_data[:, 1] = np.clip(env_data[:, 1], 20, 95)  # 限制湿度范围
    
    return load_data, env_data, timestamps


if __name__ == "__main__":
    # 示例使用
    print("=== 增强版空调负荷数据预处理示例 ===")
    
    # 加载示例数据
    load_data, env_data, timestamps = load_enhanced_sample_data()
    print(f"数据形状: 负荷 {load_data.shape}, 环境 {env_data.shape}, 时间戳 {len(timestamps)}")
    
    # 创建增强数据处理器
    processor = EnhancedHVACDataProcessor()
    
    # 拟合处理器
    processor.fit(load_data, env_data, timestamps)
    
    # 转换数据
    processed_data = processor.transform(load_data, env_data, timestamps)
    
    print(f"\n处理后数据形状:")
    for key, value in processed_data.items():
        if isinstance(value, np.ndarray):
            print(f"  {key}: {value.shape}")
    
    # 创建训练序列
    X, y = processor.create_sequences(processed_data)
    
    print(f"\n训练序列形状:")
    for key, value in X.items():
        print(f"  X_{key}: {value.shape}")
    print(f"  y: {y.shape}")
    
    # 显示特征信息
    feature_info = processor.get_feature_info()
    print(f"\n特征信息:")
    for key, value in feature_info.items():
        print(f"  {key}: {value}")
