# examples/ml_price_prediction.py
"""
机器学习股价预测示例
使用多种算法预测股票价格
"""

import sys
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from typing import Dict, List, Tuple
import warnings
warnings.filterwarnings('ignore')

# 机器学习库
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import xgboost as xgb
import lightgbm as lgb

# 深度学习库
try:
    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import LSTM, Dense, Dropout
    KERAS_AVAILABLE = True
except ImportError:
    KERAS_AVAILABLE = False
    print("⚠️ TensorFlow未安装，LSTM模型将被跳过")

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from src.services.market_data_service import MarketDataService

class FeatureEngineer:
    """特征工程类"""
    
    @staticmethod
    def create_technical_features(df: pd.DataFrame) -> pd.DataFrame:
        """创建技术指标特征"""
        data = df.copy()
        
        # 价格特征
        data['price_change'] = data['close'].pct_change()
        data['high_low_ratio'] = data['high'] / data['low']
        data['close_open_ratio'] = data['close'] / data['open']
        
        # 移动平均线
        for window in [5, 10, 20, 50]:
            data[f'ma_{window}'] = data['close'].rolling(window).mean()
            data[f'ma_{window}_ratio'] = data['close'] / data[f'ma_{window}']
        
        # 波动率
        for window in [5, 10, 20]:
            data[f'volatility_{window}'] = data['price_change'].rolling(window).std()
        
        # RSI
        delta = data['close'].diff()
        gain = delta.where(delta > 0, 0)
        loss = -delta.where(delta < 0, 0)
        avg_gain = gain.rolling(14).mean()
        avg_loss = loss.rolling(14).mean()
        rs = avg_gain / avg_loss
        data['rsi'] = 100 - (100 / (1 + rs))
        
        # MACD
        ema12 = data['close'].ewm(span=12).mean()
        ema26 = data['close'].ewm(span=26).mean()
        data['macd'] = ema12 - ema26
        data['macd_signal'] = data['macd'].ewm(span=9).mean()
        data['macd_histogram'] = data['macd'] - data['macd_signal']
        
        # 布林带
        bb_period = 20
        bb_middle = data['close'].rolling(bb_period).mean()
        bb_std = data['close'].rolling(bb_period).std()
        data['bb_upper'] = bb_middle + (bb_std * 2)
        data['bb_lower'] = bb_middle - (bb_std * 2)
        data['bb_width'] = data['bb_upper'] - data['bb_lower']
        data['bb_position'] = (data['close'] - data['bb_lower']) / data['bb_width']
        
        # 成交量特征
        data['volume_sma'] = data['volume'].rolling(20).mean()
        data['volume_ratio'] = data['volume'] / data['volume_sma']
        
        # 价格位置特征
        for window in [5, 10, 20]:
            data[f'high_{window}'] = data['high'].rolling(window).max()
            data[f'low_{window}'] = data['low'].rolling(window).min()
            data[f'price_position_{window}'] = (data['close'] - data[f'low_{window}']) / \
                                             (data[f'high_{window}'] - data[f'low_{window}'])
        
        return data
    
    @staticmethod
    def create_lag_features(df: pd.DataFrame, target_col: str = 'close', lags: List[int] = None) -> pd.DataFrame:
        """创建滞后特征"""
        if lags is None:
            lags = [1, 2, 3, 5, 10]
        
        data = df.copy()
        
        for lag in lags:
            data[f'{target_col}_lag_{lag}'] = data[target_col].shift(lag)
            data[f'{target_col}_change_lag_{lag}'] = data[target_col].pct_change(lag)
        
        return data
    
    @staticmethod
    def create_rolling_features(df: pd.DataFrame, target_col: str = 'close') -> pd.DataFrame:
        """创建滚动窗口特征"""
        data = df.copy()
        
        # 滚动统计特征
        for window in [5, 10, 20]:
            data[f'{target_col}_rolling_mean_{window}'] = data[target_col].rolling(window).mean()
            data[f'{target_col}_rolling_std_{window}'] = data[target_col].rolling(window).std()
            data[f'{target_col}_rolling_min_{window}'] = data[target_col].rolling(window).min()
            data[f'{target_col}_rolling_max_{window}'] = data[target_col].rolling(window).max()
            data[f'{target_col}_rolling_skew_{window}'] = data[target_col].rolling(window).skew()
            data[f'{target_col}_rolling_kurt_{window}'] = data[target_col].rolling(window).kurt()
        
        return data

class MLPredictor:
    """机器学习预测器"""
    
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.feature_names = []
        
    def prepare_data(self, df: pd.DataFrame, target_col: str = 'close', 
                    prediction_days: int = 1) -> Tuple[np.ndarray, np.ndarray, List[str]]:
        """准备训练数据"""
        # 特征工程
        feature_engineer = FeatureEngineer()
        data = feature_engineer.create_technical_features(df)
        data = feature_engineer.create_lag_features(data, target_col)
        data = feature_engineer.create_rolling_features(data, target_col)
        
        # 创建目标变量（未来N天的价格）
        data[f'target_{prediction_days}d'] = data[target_col].shift(-prediction_days)
        
        # 选择特征列
        feature_cols = [col for col in data.columns if col not in [
            'target_1d', 'target_3d', 'target_5d', 'target_10d', target_col
        ] and not col.endswith('_lag_0')]
        
        # 删除包含NaN的行
        data = data.dropna()
        
        if data.empty:
            raise ValueError("数据清理后为空，请检查输入数据")
        
        X = data[feature_cols].values
        y = data[f'target_{prediction_days}d'].values
        
        self.feature_names = feature_cols
        
        return X, y, feature_cols
    
    def train_traditional_models(self, X_train: np.ndarray, y_train: np.ndarray, 
                               X_test: np.ndarray, y_test: np.ndarray) -> Dict:
        """训练传统机器学习模型"""
        
        # 数据标准化
        scaler = StandardScaler()
        X_train_scaled = scaler.fit_transform(X_train)
        X_test_scaled = scaler.transform(X_test)
        self.scalers['standard'] = scaler
        
        models_config = {
            'linear_regression': LinearRegression(),
            'ridge': Ridge(alpha=1.0),
            'lasso': Lasso(alpha=0.1),
            'random_forest': RandomForestRegressor(
                n_estimators=100, 
                max_depth=10, 
                random_state=42,
                n_jobs=-1
            ),
            'gradient_boosting': GradientBoostingRegressor(
                n_estimators=100,
                max_depth=6,
                random_state=42
            ),
            'xgboost': xgb.XGBRegressor(
                n_estimators=100,
                max_depth=6,
                random_state=42
            ),
            'lightgbm': lgb.LGBMRegressor(
                n_estimators=100,
                max_depth=6,
                random_state=42,
                verbose=-1
            ),
            'svr': SVR(kernel='rbf', C=100, gamma=0.1)
        }
        
        results = {}
        
        print("🤖 训练传统机器学习模型...")
        
        for model_name, model in models_config.items():
            try:
                print(f"   训练 {model_name}...")
                
                # 根据模型选择是否使用标准化数据
                if model_name in ['svr', 'linear_regression', 'ridge', 'lasso']:
                    X_train_model = X_train_scaled
                    X_test_model = X_test_scaled
                else:
                    X_train_model = X_train
                    X_test_model = X_test
                
                # 训练模型
                model.fit(X_train_model, y_train)
                
                # 预测
                y_pred = model.predict(X_test_model)
                
                # 评估
                mse = mean_squared_error(y_test, y_pred)
                mae = mean_absolute_error(y_test, y_pred)
                r2 = r2_score(y_test, y_pred)
                
                results[model_name] = {
                    'model': model,
                    'predictions': y_pred,
                    'mse': mse,
                    'mae': mae,
                    'r2': r2,
                    'rmse': np.sqrt(mse)
                }
                
                self.models[model_name] = model
                
                print(f"     ✅ {model_name}: R² = {r2:.3f}, RMSE = {np.sqrt(mse):.3f}")
                
            except Exception as e:
                print(f"     ❌ {model_name} 训练失败: {e}")
                
        return results
    
    def train_lstm_model(self, data: pd.DataFrame, target_col: str = 'close', 
                        sequence_length: int = 60) -> Dict:
        """训练LSTM深度学习模型"""
        
        if not KERAS_AVAILABLE:
            print("⚠️ TensorFlow未安装，跳过LSTM模型")
            return {}
        
        print("🧠 训练LSTM深度学习模型...")
        
        try:
            # 准备LSTM数据
            scaler = MinMaxScaler()
            scaled_data = scaler.fit_transform(data[[target_col]])
            self.scalers['lstm'] = scaler
            
            # 创建序列数据
            X, y = [], []
            for i in range(sequence_length, len(scaled_data)):
                X.append(scaled_data[i-sequence_length:i, 0])
                y.append(scaled_data[i, 0])
            
            X, y = np.array(X), np.array(y)
            X = X.reshape((X.shape[0], X.shape[1], 1))
            
            # 分割数据
            split_index = int(0.8 * len(X))
            X_train, X_test = X[:split_index], X[split_index:]
            y_train, y_test = y[:split_index], y[split_index:]
            
            # 构建LSTM模型
            model = Sequential([
                LSTM(50, return_sequences=True, input_shape=(sequence_length, 1)),
                Dropout(0.2),
                LSTM(50, return_sequences=True),
                Dropout(0.2),
                LSTM(50),
                Dropout(0.2),
                Dense(1)
            ])
            
            model.compile(optimizer='adam', loss='mse')
            
            # 训练模型
            history = model.fit(
                X_train, y_train,
                epochs=50,
                batch_size=32,
                validation_data=(X_test, y_test),
                verbose=0
            )
            
            # 预测
            y_pred = model.predict(X_test)
            
            # 反向缩放
            y_test_actual = scaler.inverse_transform(y_test.reshape(-1, 1)).flatten()
            y_pred_actual = scaler.inverse_transform(y_pred).flatten()
            
            # 评估
            mse = mean_squared_error(y_test_actual, y_pred_actual)
            mae = mean_absolute_error(y_test_actual, y_pred_actual)
            r2 = r2_score(y_test_actual, y_pred_actual)
            
            self.models['lstm'] = model
            
            print(f"     ✅ LSTM: R² = {r2:.3f}, RMSE = {np.sqrt(mse):.3f}")
            
            return {
                'lstm': {
                    'model': model,
                    'predictions': y_pred_actual,
                    'actual': y_test_actual,
                    'mse': mse,
                    'mae': mae,
                    'r2': r2,
                    'rmse': np.sqrt(mse),
                    'history': history
                }
            }
            
        except Exception as e:
            print(f"     ❌ LSTM训练失败: {e}")
            return {}
    
    def hyperparameter_tuning(self, X_train: np.ndarray, y_train: np.ndarray) -> Dict:
        """超参数调优"""
        print("🔧 进行超参数调优...")
        
        # Random Forest调优
        rf_params = {
            'n_estimators': [50, 100, 200],
            'max_depth': [5, 10, 15, None],
            'min_samples_split': [2, 5, 10],
            'min_samples_leaf': [1, 2, 4]
        }
        
        rf_grid = GridSearchCV(
            RandomForestRegressor(random_state=42),
            rf_params,
            cv=5,
            scoring='r2',
            n_jobs=-1
        )
        
        rf_grid.fit(X_train, y_train)
        
        # XGBoost调优
        xgb_params = {
            'n_estimators': [50, 100, 200],
            'max_depth': [3, 6, 9],
            'learning_rate': [0.01, 0.1, 0.2],
            'subsample': [0.8, 0.9, 1.0]
        }
        
        xgb_grid = GridSearchCV(
            xgb.XGBRegressor(random_state=42),
            xgb_params,
            cv=5,
            scoring='r2',
            n_jobs=-1
        )
        
        xgb_grid.fit(X_train, y_train)
        
        tuned_models = {
            'random_forest_tuned': rf_grid.best_estimator_,
            'xgboost_tuned': xgb_grid.best_estimator_
        }
        
        print(f"     ✅ Random Forest最佳参数: {rf_grid.best_params_}")
        print(f"     ✅ XGBoost最佳参数: {xgb_grid.best_params_}")
        
        return tuned_models
    
    def feature_importance_analysis(self, model_name: str) -> pd.DataFrame:
        """特征重要性分析"""
        if model_name not in self.models:
            print(f"❌ 模型 {model_name} 不存在")
            return pd.DataFrame()
        
        model = self.models[model_name]
        
        # 获取特征重要性
        if hasattr(model, 'feature_importances_'):
            importances = model.feature_importances_
        elif hasattr(model, 'coef_'):
            importances = np.abs(model.coef_)
        else:
            print(f"❌ 模型 {model_name} 不支持特征重要性分析")
            return pd.DataFrame()
        
        feature_importance = pd.DataFrame({
            'feature': self.feature_names,
            'importance': importances
        }).sort_values('importance', ascending=False)
        
        return feature_importance

def ensemble_predictions(predictions_dict: Dict) -> np.ndarray:
    """集成预测结果"""
    predictions = []
    weights = []
    
    for model_name, result in predictions_dict.items():
        if 'predictions' in result:
            predictions.append(result['predictions'])
            # 根据R²分数设置权重
            weight = max(0, result.get('r2', 0))
            weights.append(weight)
    
    if not predictions:
        return np.array([])
    
    predictions = np.array(predictions)
    weights = np.array(weights)
    
    # 权重归一化
    if weights.sum() > 0:
        weights = weights / weights.sum()
        ensemble_pred = np.average(predictions, axis=0, weights=weights)
    else:
        ensemble_pred = np.mean(predictions, axis=0)
    
    return ensemble_pred

def main():
    """主函数演示机器学习价格预测"""
    
    print("🤖 机器学习股价预测系统")
    print("=" * 50)
    
    # 初始化服务
    market_service = MarketDataService()
    predictor = MLPredictor()
    
    # 获取数据
    symbol = 'AAPL'
    print(f"📊 获取 {symbol} 历史数据...")
    
    start_date = datetime.now() - timedelta(days=1000)  # 获取更多历史数据
    end_date = datetime.now()
    
    data = market_service.get_stock_price(
        symbol,
        start_date=start_date,
        end_date=end_date,
        interval='1d'
    )
    
    if data.empty:
        print("❌ 未能获取数据")
        return
    
    print(f"✅ 获取到 {len(data)} 条数据记录")
    
    try:
        # 准备数据
        print("\n🔧 准备训练数据...")
        X, y, feature_names = predictor.prepare_data(data, target_col='close', prediction_days=1)
        
        # 分割训练和测试数据
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, shuffle=False
        )
        
        print(f"   训练集大小: {X_train.shape[0]}")
        print(f"   测试集大小: {X_test.shape[0]}")
        print(f"   特征数量: {X_train.shape[1]}")
        
        # 训练传统机器学习模型
        ml_results = predictor.train_traditional_models(X_train, y_train, X_test, y_test)
        
        # 训练LSTM模型
        lstm_results = predictor.train_lstm_model(data, target_col='close')
        
        # 合并结果
        all_results = {**ml_results, **lstm_results}
        
        # 显示模型性能对比
        print(f"\n📊 模型性能对比:")
        print("-" * 60)
        print(f"{'模型名称':<20} {'R²':<8} {'RMSE':<10} {'MAE':<10}")
        print("-" * 60)
        
        for model_name, result in all_results.items():
            r2 = result.get('r2', 0)
            rmse = result.get('rmse', 0)
            mae = result.get('mae', 0)
            print(f"{model_name:<20} {r2:<8.3f} {rmse:<10.3f} {mae:<10.3f}")
        
        # 找出最佳模型
        best_model = max(all_results.items(), key=lambda x: x[1].get('r2', 0))
        print(f"\n🏆 最佳模型: {best_model[0]} (R² = {best_model[1]['r2']:.3f})")
        
        # 特征重要性分析
        if 'random_forest' in ml_results:
            print(f"\n🔍 Random Forest 特征重要性分析:")
            importance_df = predictor.feature_importance_analysis('random_forest')
            print(importance_df.head(10))
        
        # 集成预测
        print(f"\n🔮 集成预测结果:")
        ensemble_pred = ensemble_predictions(all_results)
        if len(ensemble_pred) > 0:
            ensemble_mse = mean_squared_error(y_test, ensemble_pred)
            ensemble_r2 = r2_score(y_test, ensemble_pred)
            print(f"   集成模型 R²: {ensemble_r2:.3f}")
            print(f"   集成模型 RMSE: {np.sqrt(ensemble_mse):.3f}")
        
        # 未来价格预测
        print(f"\n🔮 未来价格预测:")
        latest_data = data.tail(1)
        current_price = latest_data['close'].iloc[0]
        print(f"   当前价格: ${current_price:.2f}")
        
        # 使用最佳模型预测
        if best_model[0] in predictor.models:
            # 这里需要准备最新的特征数据进行预测
            print(f"   {best_model[0]} 预测: [需要最新特征数据]")
        
        # 超参数调优（可选）
        print(f"\n🔧 超参数调优...")
        tuned_models = predictor.hyperparameter_tuning(X_train, y_train)
        
        for name, model in tuned_models.items():
            y_pred_tuned = model.predict(X_test)
            r2_tuned = r2_score(y_test, y_pred_tuned)
            print(f"   {name} 调优后 R²: {r2_tuned:.3f}")
        
    except Exception as e:
        print(f"❌ 预测过程中发生错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()