"""
高级AI需求预测模块
集成LSTM、Transformer和集成学习模型，提供高精度的需求预测
"""

import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, GRU, Conv1D, MaxPooling1D, Flatten
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error
import xgboost as xgb
from prophet import Prophet
import warnings
warnings.filterwarnings('ignore')

class DataPreprocessor:
    """数据预处理器，处理时间序列数据"""
    
    def __init__(self):
        self.scaler = MinMaxScaler()
        self.feature_scaler = StandardScaler()
        
        # TODO: [需求预测优化] - 增强数据预处理能力
        # 业务需求:
        # 1. 支持多SKU批量处理，提高预处理效率
        # 2. 集成外部数据源（天气、节假日、促销活动）
        # 3. 实现数据质量检测和异常值处理
        # 4. 支持不同时间粒度的数据聚合（小时、日、周、月）
        # 技术实现:
        # 1. 使用多进程并行处理大批量SKU数据
        # 2. 集成第三方API获取外部特征数据
        # 3. 实现统计方法和机器学习方法的异常检测
        # 4. 添加数据管道监控和质量报告功能
        
    def create_features(self, df):
        """创建时间序列特征"""
        df = df.copy()
        df['date'] = pd.to_datetime(df['date'])
        df = df.sort_values('date')
        
        # TODO: [特征工程优化] - 增强特征提取能力
        # 业务需求:
        # 1. 添加节假日和促销活动特征，提高预测准确性
        # 2. 集成竞品价格和市场趋势数据
        # 3. 考虑供应商交期和库存水位影响
        # 4. 添加客户行为和偏好特征
        # 技术实现:
        # 1. 集成节假日API和促销活动数据库
        # 2. 使用爬虫获取竞品价格信息
        # 3. 计算供应链健康度指标
        # 4. 实现客户细分和行为分析
        
        # 时间特征
        df['dayofweek'] = df['date'].dt.dayofweek
        df['month'] = df['date'].dt.month
        df['quarter'] = df['date'].dt.quarter
        df['year'] = df['date'].dt.year
        df['dayofyear'] = df['date'].dt.dayofyear
        
        # TODO: 添加节假日特征
        # df['is_holiday'] = self._get_holiday_features(df['date'])
        # df['days_to_holiday'] = self._calculate_days_to_holiday(df['date'])
        
        # 滞后特征
        for lag in [1, 3, 7, 14, 30]:
            df[f'lag_{lag}'] = df['quantity'].shift(lag)
        
        # TODO: 添加交叉滞后特征
        # for lag1, lag2 in [(1, 7), (3, 14), (7, 30)]:
        #     df[f'lag_ratio_{lag1}_{lag2}'] = df[f'lag_{lag1}'] / (df[f'lag_{lag2}'] + 1e-8)
        
        # 滚动统计特征
        for window in [7, 14, 30]:
            df[f'rolling_mean_{window}'] = df['quantity'].rolling(window=window).mean()
            df[f'rolling_std_{window}'] = df['quantity'].rolling(window=window).std()
            df[f'rolling_max_{window}'] = df['quantity'].rolling(window=window).max()
            df[f'rolling_min_{window}'] = df['quantity'].rolling(window=window).min()
            
            # TODO: 添加更多统计特征
            # df[f'rolling_skew_{window}'] = df['quantity'].rolling(window=window).skew()
            # df[f'rolling_kurt_{window}'] = df['quantity'].rolling(window=window).kurt()
        
        # 价格特征（如果有）
        if 'price' in df.columns:
            df['price_change'] = df['price'].pct_change()
            df['price_ma_7'] = df['price'].rolling(window=7).mean()
            
            # TODO: 添加价格弹性特征
            # df['price_elasticity'] = self._calculate_price_elasticity(df)
        
        # TODO: 添加外部特征
        # df = self._add_weather_features(df)
        # df = self._add_economic_indicators(df)
        # df = self._add_competitor_features(df)
        
        return df.dropna()
    
    def create_sequences(self, data, target_col, sequence_length=30, forecast_horizon=7):
        """创建LSTM序列数据"""
        X, y = [], []
        
        for i in range(len(data) - sequence_length - forecast_horizon + 1):
            X.append(data[i:i + sequence_length])
            y.append(data[i + sequence_length:i + sequence_length + forecast_horizon, target_col])
        
        return np.array(X), np.array(y)

class LSTMPredictor:
    """LSTM深度学习预测模型"""
    
    def __init__(self, sequence_length=30, forecast_horizon=7, features=1):
        self.sequence_length = sequence_length
        self.forecast_horizon = forecast_horizon
        self.features = features
        self.model = None
        self.scaler = MinMaxScaler()
        
        # TODO: [LSTM模型优化] - 提升深度学习预测性能
        # 业务需求:
        # 1. 支持多步预测和不确定性量化
        # 2. 实现模型自动调参和架构搜索
        # 3. 添加注意力机制提高长序列预测能力
        # 4. 支持多变量时间序列预测
        # 技术实现:
        # 1. 实现贝叶斯LSTM提供预测区间
        # 2. 使用Optuna进行超参数优化
        # 3. 集成Attention机制和Transformer架构
        # 4. 支持多元时间序列的协同预测
        
    def build_model(self, lstm_units=[128, 64, 32], dropout_rate=0.2):
        """构建LSTM模型"""
        model = Sequential([
            LSTM(lstm_units[0], return_sequences=True, input_shape=(self.sequence_length, self.features)),
            Dropout(dropout_rate),
            LSTM(lstm_units[1], return_sequences=True),
            Dropout(dropout_rate),
            LSTM(lstm_units[2]),
            Dropout(dropout_rate),
            Dense(64, activation='relu'),
            Dense(32, activation='relu'),
            Dense(self.forecast_horizon, activation='linear')
        ])
        
        model.compile(optimizer=Adam(learning_rate=0.001), loss='mse', metrics=['mae'])
        return model
    
    def prepare_data(self, df, target_col='quantity'):
        """准备训练数据"""
        # 标准化数据
        scaled_data = self.scaler.fit_transform(df[[target_col]])
        
        # 创建序列
        X, y = [], []
        for i in range(len(scaled_data) - self.sequence_length - self.forecast_horizon + 1):
            X.append(scaled_data[i:i + self.sequence_length])
            y.append(scaled_data[i + sequence_length:i + sequence_length + self.forecast_horizon, 0])
        
        X = np.array(X)
        y = np.array(y)
        
        return X, y
    
    def train(self, X_train, y_train, X_val=None, y_val=None, epochs=100, batch_size=32):
        """训练LSTM模型"""
        self.model = self.build_model()
        
        callbacks = [
            tf.keras.callbacks.EarlyStopping(patience=15, restore_best_weights=True),
            tf.keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5, min_lr=0.0001)
        ]
        
        if X_val is not None and y_val is not None:
            history = self.model.fit(
                X_train, y_train,
                validation_data=(X_val, y_val),
                epochs=epochs,
                batch_size=batch_size,
                callbacks=callbacks,
                verbose=0
            )
        else:
            history = self.model.fit(
                X_train, y_train,
                epochs=epochs,
                batch_size=batch_size,
                callbacks=callbacks,
                verbose=0
            )
        
        return history
    
    def predict(self, X):
        """进行预测"""
        if self.model is None:
            raise ValueError("Model not trained yet")
        
        predictions = self.model.predict(X)
        
        # 反标准化
        predictions = self.scaler.inverse_transform(
            predictions.reshape(-1, 1)
        ).reshape(predictions.shape)
        
        return predictions

class TransformerPredictor:
    """Transformer时序预测模型"""
    
    def __init__(self, sequence_length=30, forecast_horizon=7, d_model=128, num_heads=8):
        self.sequence_length = sequence_length
        self.forecast_horizon = forecast_horizon
        self.d_model = d_model
        self.num_heads = num_heads
        self.model = None
        self.scaler = MinMaxScaler()
        
    def build_model(self):
        """构建Transformer模型"""
        inputs = tf.keras.layers.Input(shape=(self.sequence_length, 1))
        
        # 位置编码
        positions = tf.range(start=0, limit=self.sequence_length, delta=1)
        pos_encoding = tf.keras.layers.Embedding(
            input_dim=self.sequence_length, 
            output_dim=self.d_model
        )(positions)
        
        # 输入投影
        x = tf.keras.layers.Dense(self.d_model)(inputs)
        x = x + pos_encoding
        
        # Transformer编码器
        for _ in range(2):
            # 多头注意力
            attn_output = tf.keras.layers.MultiHeadAttention(
                num_heads=self.num_heads, 
                key_dim=self.d_model // self.num_heads
            )(x, x)
            x = tf.keras.layers.Dropout(0.1)(attn_output)
            x = tf.keras.layers.LayerNormalization()(x)
            
            # 前馈网络
            ffn = tf.keras.Sequential([
                tf.keras.layers.Dense(self.d_model * 4, activation='relu'),
                tf.keras.layers.Dense(self.d_model)
            ])
            x = ffn(x)
            x = tf.keras.layers.Dropout(0.1)(x)
            x = tf.keras.layers.LayerNormalization()(x)
        
        # 全局平均池化
        x = tf.keras.layers.GlobalAveragePooling1D()(x)
        
        # 输出层
        outputs = tf.keras.layers.Dense(self.forecast_horizon, activation='linear')(x)
        
        model = tf.keras.Model(inputs=inputs, outputs=outputs)
        model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
        
        return model
    
    def train(self, X_train, y_train, X_val=None, y_val=None, epochs=100, batch_size=32):
        """训练Transformer模型"""
        self.model = self.build_model()
        
        callbacks = [
            tf.keras.callbacks.EarlyStopping(patience=15, restore_best_weights=True),
            tf.keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5, min_lr=0.0001)
        ]
        
        if X_val is not None and y_val is not None:
            history = self.model.fit(
                X_train, y_train,
                validation_data=(X_val, y_val),
                epochs=epochs,
                batch_size=batch_size,
                callbacks=callbacks,
                verbose=0
            )
        else:
            history = self.model.fit(
                X_train, y_train,
                epochs=epochs,
                batch_size=batch_size,
                callbacks=callbacks,
                verbose=0
            )
        
        return history
    
    def predict(self, X):
        """进行预测"""
        if self.model is None:
            raise ValueError("Model not trained yet")
        
        return self.model.predict(X)

class EnsembleForecaster:
    """集成预测器，结合多种模型"""
    
    def __init__(self):
        self.models = {}
        self.weights = {}
        self.scaler = MinMaxScaler()
        
    def add_model(self, name, model, weight=1.0):
        """添加模型到集成"""
        self.models[name] = model
        self.weights[name] = weight
    
    def train_all(self, X_train, y_train, X_val=None, y_val=None):
        """训练所有模型"""
        histories = {}
        
        for name, model in self.models.items():
            if hasattr(model, 'train'):
                history = model.train(X_train, y_train, X_val, y_val)
                histories[name] = history
            else:
                # 对于sklearn模型
                model.fit(X_train.reshape(X_train.shape[0], -1), y_train)
        
        return histories
    
    def predict_with_weights(self, X):
        """加权预测"""
        predictions = {}
        
        for name, model in self.models.items():
            if hasattr(model, 'predict'):
                pred = model.predict(X)
            else:
                pred = model.predict(X.reshape(X.shape[0], -1))
            predictions[name] = pred
        
        # 加权平均
        weighted_pred = None
        total_weight = 0
        
        for name, pred in predictions.items():
            weight = self.weights[name]
            if weighted_pred is None:
                weighted_pred = weight * pred
            else:
                weighted_pred += weight * pred
            total_weight += weight
        
        return weighted_pred / total_weight
    
    def optimize_weights(self, X_val, y_val):
        """优化模型权重"""
        from scipy.optimize import minimize
        
        # 获取所有预测
        predictions = {}
        for name, model in self.models.items():
            if hasattr(model, 'predict'):
                pred = model.predict(X_val)
            else:
                pred = model.predict(X_val.reshape(X_val.shape[0], -1))
            predictions[name] = pred
        
        def objective(weights):
            weighted_pred = None
            total_weight = 0
            
            for i, (name, pred) in enumerate(predictions.items()):
                weight = weights[i]
                if weighted_pred is None:
                    weighted_pred = weight * pred
                else:
                    weighted_pred += weight * pred
                total_weight += weight
            
            if total_weight > 0:
                weighted_pred = weighted_pred / total_weight
                return mean_squared_error(y_val, weighted_pred)
            return float('inf')
        
        # 约束：权重之和为1
        constraints = ({'type': 'eq', 'fun': lambda w: np.sum(w) - 1})
        bounds = [(0, 1)] * len(self.models)
        
        result = minimize(objective, [1/len(self.models)] * len(self.models), 
                         method='SLSQP', bounds=bounds, constraints=constraints)
        
        # 更新权重
        for i, name in enumerate(self.models.keys()):
            self.weights[name] = result.x[i]

class ExternalDataIntegrator:
    """外部数据集成器"""
    
    def __init__(self):
        self.weather_data = None
        self.holiday_data = None
        self.market_data = None
        
    def get_weather_data(self, start_date, end_date, location='Beijing'):
        """获取天气数据"""
        # 这里可以集成实际的天气API
        # 目前返回模拟数据
        date_range = pd.date_range(start=start_date, end=end_date)
        weather_df = pd.DataFrame({
            'date': date_range,
            'temperature': np.random.normal(20, 10, len(date_range)),
            'humidity': np.random.normal(60, 20, len(date_range)),
            'precipitation': np.random.exponential(2, len(date_range))
        })
        return weather_df
    
    def get_holiday_data(self, start_date, end_date, country='CN'):
        """获取节假日数据"""
        # 这里可以集成实际的节假日API
        date_range = pd.date_range(start=start_date, end=end_date)
        holiday_df = pd.DataFrame({
            'date': date_range,
            'is_holiday': np.random.choice([0, 1], len(date_range), p=[0.9, 0.1]),
            'holiday_type': np.random.choice(['none', 'national', 'weekend'], len(date_range), p=[0.8, 0.1, 0.1])
        })
        return holiday_df
    
    def enrich_demand_data(self, demand_df, start_date, end_date):
        """丰富需求数据"""
        weather_df = self.get_weather_data(start_date, end_date)
        holiday_df = self.get_holiday_data(start_date, end_date)
        
        # 合并数据
        enriched_df = demand_df.merge(weather_df, on='date', how='left')
        enriched_df = enriched_df.merge(holiday_df, on='date', how='left')
        
        return enriched_df

class AdvancedDemandForecaster:
    """高级需求预测主类"""
    
    def __init__(self):
        self.preprocessor = DataPreprocessor()
        self.lstm_predictor = LSTMPredictor()
        self.transformer_predictor = TransformerPredictor()
        self.ensemble = EnsembleForecaster()
        self.external_data = ExternalDataIntegrator()
        
    def prepare_training_data(self, df, product_id, date_col='date', target_col='quantity'):
        """准备训练数据"""
        # 筛选特定产品数据
        product_data = df[df['product_id'] == product_id].copy()
        
        if len(product_data) < 60:
            raise ValueError(f"Insufficient data for product {product_id}")
        
        # 按日期聚合
        daily_data = product_data.groupby(date_col)[target_col].sum().reset_index()
        daily_data.columns = ['date', 'quantity']
        
        # 丰富外部数据
        start_date = daily_data['date'].min()
        end_date = daily_data['date'].max()
        enriched_data = self.external_data.enrich_demand_data(daily_data, start_date, end_date)
        
        return enriched_data
    
    def train_models(self, df, product_id, test_size=0.2):
        """训练所有模型"""
        # 准备数据
        data = self.prepare_training_data(df, product_id)
        
        # 创建特征
        features = self.preprocessor.create_features(data)
        
        # 准备序列数据
        scaler = MinMaxScaler()
        scaled_quantity = scaler.fit_transform(features[['quantity']])
        
        X, y = [], []
        for i in range(len(scaled_quantity) - 30 - 7 + 1):
            X.append(scaled_quantity[i:i+30])
            y.append(scaled_quantity[i+30:i+30+7].flatten())
        
        X = np.array(X)
        y = np.array(y)
        
        # 划分训练测试集
        split_idx = int(len(X) * (1 - test_size))
        X_train, X_test = X[:split_idx], X[split_idx:]
        y_train, y_test = y[:split_idx], y[split_idx:]
        
        # 训练LSTM
        lstm_history = self.lstm_predictor.train(X_train, y_train, X_test, y_test)
        
        # 训练Transformer
        transformer_history = self.transformer_predictor.train(X_train, y_train, X_test, y_test)
        
        # 训练传统模型
        X_flat = X.reshape(X.shape[0], -1)
        rf_model = RandomForestRegressor(n_estimators=100, random_state=42)
        gb_model = GradientBoostingRegressor(n_estimators=100, random_state=42)
        xgb_model = xgb.XGBRegressor(n_estimators=100, random_state=42)
        
        rf_model.fit(X_flat, y_train.mean(axis=1))
        gb_model.fit(X_flat, y_train.mean(axis=1))
        xgb_model.fit(X_flat, y_train.mean(axis=1))
        
        # 添加到集成
        self.ensemble.add_model('LSTM', self.lstm_predictor, weight=0.3)
        self.ensemble.add_model('Transformer', self.transformer_predictor, weight=0.25)
        self.ensemble.add_model('RandomForest', rf_model, weight=0.15)
        self.ensemble.add_model('GradientBoosting', gb_model, weight=0.15)
        self.ensemble.add_model('XGBoost', xgb_model, weight=0.15)
        
        # 优化权重
        self.ensemble.optimize_weights(X_test, y_test.mean(axis=1))
        
        return {
            'X_test': X_test,
            'y_test': y_test,
            'scaler': scaler,
            'test_dates': data['date'].iloc[-len(X_test):]
        }
    
    def predict(self, df, product_id, horizon=7):
        """进行预测"""
        # 准备最新数据
        data = self.prepare_training_data(df, product_id)
        
        # 获取最后30天的数据
        recent_data = data.tail(30)
        
        # 准备输入
        scaler = MinMaxScaler()
        scaled_data = scaler.fit_transform(recent_data[['quantity']])
        X_input = scaled_data.reshape(1, 30, 1)
        
        # 集成预测
        ensemble_pred = self.ensemble.predict_with_weights(X_input)
        
        # 反标准化
        ensemble_pred = scaler.inverse_transform(
            ensemble_pred.reshape(-1, 1)
        ).flatten()
        
        return ensemble_pred
    
    def get_model_performance(self, X_test, y_test, scaler):
        """获取模型性能指标"""
        predictions = {}
        
        for name, model in self.ensemble.models.items():
            if hasattr(model, 'predict'):
                pred = model.predict(X_test)
            else:
                pred = model.predict(X_test.reshape(X_test.shape[0], -1))
            
            # 反标准化
            pred = scaler.inverse_transform(
                pred.reshape(-1, 1)
            ).flatten()
            true = scaler.inverse_transform(
                y_test.mean(axis=1).reshape(-1, 1)
            ).flatten()
            
            mae = mean_absolute_error(true, pred)
            rmse = np.sqrt(mean_squared_error(true, pred))
            mape = np.mean(np.abs((true - pred) / true)) * 100
            
            predictions[name] = {
                'MAE': mae,
                'RMSE': rmse,
                'MAPE': mape
            }
        
        return predictions