import logging
import os
import pickle
import numpy as np
import pandas as pd
from typing import List, Dict, Any, Optional, Tuple, Callable, Union, Type
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split, TimeSeriesSplit, cross_val_score
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, mean_squared_error, r2_score
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif

# 尝试导入PyTorch
try:
    import torch
    import torch.nn as nn
    import torch.optim as optim
    from torch.utils.data import Dataset, DataLoader
    TORCH_AVAILABLE = True
except ImportError:
    TORCH_AVAILABLE = False
    logging.warning("PyTorch未安装，PyTorch相关功能将不可用")


class QMLFeatureExtractor:
    """
    特征提取器，用于从市场数据中提取机器学习所需的特征
    
    特征包括：
    1. 技术指标（移动平均、RSI、MACD等）
    2. 价格模式（趋势、波动性等）
    3. 时间特征（星期几、月份等）
    4. 滞后特征（前N天的价格、交易量等）
    """
    
    def __init__(self):
        """初始化特征提取器"""
        self.feature_functions = {}
        self._register_default_features()
        
    def _register_default_features(self):
        """注册默认特征计算函数"""
        # 价格特征
        self.register_feature('returns', self._calculate_returns)
        self.register_feature('log_returns', self._calculate_log_returns)
        
        # 技术指标
        self.register_feature('sma', self._calculate_sma)
        self.register_feature('ema', self._calculate_ema)
        self.register_feature('rsi', self._calculate_rsi)
        self.register_feature('macd', self._calculate_macd)
        self.register_feature('bollinger', self._calculate_bollinger)
        
        # 波动性指标
        self.register_feature('volatility', self._calculate_volatility)
        self.register_feature('atr', self._calculate_atr)
        
        # 趋势指标
        self.register_feature('adx', self._calculate_adx)
        
        # 交易量指标
        self.register_feature('obv', self._calculate_obv)
        self.register_feature('vwap', self._calculate_vwap)
        
        # 时间特征
        self.register_feature('time_features', self._extract_time_features)
        
        # 滞后特征
        self.register_feature('lagged_features', self._create_lagged_features)
        
    def register_feature(self, name: str, function: Callable):
        """
        注册自定义特征计算函数
        
        参数:
            name: 特征名称
            function: 特征计算函数
        """
        self.feature_functions[name] = function
        
    def extract_features(self, data: pd.DataFrame, features_config: Dict[str, Any]) -> pd.DataFrame:
        """
        从数据中提取特征
        
        参数:
            data: 输入数据，应包含OHLCV列
            features_config: 特征配置字典，格式为{特征名称: 参数字典}
            
        返回:
            pd.DataFrame: 特征数据框
        """
        features_df = pd.DataFrame(index=data.index)
        
        # 检查必要的列
        required_columns = ['open', 'high', 'low', 'close', 'volume']
        missing_columns = [col for col in required_columns if col not in data.columns]
        if missing_columns:
            logging.warning(f"数据缺少必要列: {missing_columns}")
        
        # 提取每个特征
        for feature_name, params in features_config.items():
            if feature_name in self.feature_functions:
                try:
                    if params is None:
                        params = {}
                    feature_data = self.feature_functions[feature_name](data, **params)
                    
                    # 如果返回的是DataFrame，合并所有列
                    if isinstance(feature_data, pd.DataFrame):
                        for col in feature_data.columns:
                            features_df[col] = feature_data[col]
                    # 如果返回的是Series，添加为单列
                    elif isinstance(feature_data, pd.Series):
                        features_df[feature_name] = feature_data
                    # 如果返回的是字典，添加每个键值对为列
                    elif isinstance(feature_data, dict):
                        for key, value in feature_data.items():
                            features_df[key] = value
                except Exception as e:
                    logging.error(f"计算特征 {feature_name} 时出错: {str(e)}")
            else:
                logging.warning(f"未知特征: {feature_name}")
                
        return features_df
        
    # 价格特征计算函数
    def _calculate_returns(self, data: pd.DataFrame, period: int = 1, column: str = 'close') -> pd.Series:
        """计算价格收益率"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
        return data[column].pct_change(period)
        
    def _calculate_log_returns(self, data: pd.DataFrame, period: int = 1, column: str = 'close') -> pd.Series:
        """计算对数收益率"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
        return np.log(data[column] / data[column].shift(period))
        
    # 技术指标计算函数
    def _calculate_sma(self, data: pd.DataFrame, periods: List[int] = [5, 10, 20, 50], column: str = 'close') -> pd.DataFrame:
        """计算简单移动平均线"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
            
        result = pd.DataFrame(index=data.index)
        for period in periods:
            result[f'sma_{period}'] = data[column].rolling(window=period).mean()
        return result
        
    def _calculate_ema(self, data: pd.DataFrame, periods: List[int] = [5, 10, 20, 50], column: str = 'close') -> pd.DataFrame:
        """计算指数移动平均线"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
            
        result = pd.DataFrame(index=data.index)
        for period in periods:
            result[f'ema_{period}'] = data[column].ewm(span=period, adjust=False).mean()
        return result
        
    def _calculate_rsi(self, data: pd.DataFrame, period: int = 14, column: str = 'close') -> pd.Series:
        """计算RSI指标"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
            
        delta = data[column].diff()
        gain = delta.where(delta > 0, 0)
        loss = -delta.where(delta < 0, 0)
        
        avg_gain = gain.rolling(window=period).mean()
        avg_loss = loss.rolling(window=period).mean()
        
        rs = avg_gain / avg_loss
        rsi = 100 - (100 / (1 + rs))
        
        return pd.Series(rsi, name='rsi')
        
    def _calculate_macd(self, data: pd.DataFrame, fast_period: int = 12, slow_period: int = 26, 
                       signal_period: int = 9, column: str = 'close') -> pd.DataFrame:
        """计算MACD指标"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
            
        ema_fast = data[column].ewm(span=fast_period, adjust=False).mean()
        ema_slow = data[column].ewm(span=slow_period, adjust=False).mean()
        macd_line = ema_fast - ema_slow
        signal_line = macd_line.ewm(span=signal_period, adjust=False).mean()
        histogram = macd_line - signal_line
        
        result = pd.DataFrame(index=data.index)
        result['macd_line'] = macd_line
        result['macd_signal'] = signal_line
        result['macd_histogram'] = histogram
        
        return result
        
    def _calculate_bollinger(self, data: pd.DataFrame, period: int = 20, std_dev: float = 2.0, 
                           column: str = 'close') -> pd.DataFrame:
        """计算布林带指标"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
            
        middle = data[column].rolling(window=period).mean()
        std = data[column].rolling(window=period).std()
        
        upper = middle + std_dev * std
        lower = middle - std_dev * std
        
        result = pd.DataFrame(index=data.index)
        result['bb_middle'] = middle
        result['bb_upper'] = upper
        result['bb_lower'] = lower
        result['bb_width'] = (upper - lower) / middle
        result['bb_percent'] = (data[column] - lower) / (upper - lower)
        
        return result
        
    # 波动性指标
    def _calculate_volatility(self, data: pd.DataFrame, period: int = 20, column: str = 'close') -> pd.Series:
        """计算波动率"""
        if column not in data.columns:
            raise ValueError(f"列 {column} 不存在")
            
        log_returns = np.log(data[column] / data[column].shift(1))
        return log_returns.rolling(window=period).std() * np.sqrt(252)  # 年化波动率
        
    def _calculate_atr(self, data: pd.DataFrame, period: int = 14) -> pd.Series:
        """计算平均真实范围（ATR）"""
        if any(col not in data.columns for col in ['high', 'low', 'close']):
            raise ValueError("数据必须包含high、low和close列")
            
        high = data['high']
        low = data['low']
        close = data['close']
        
        tr1 = high - low
        tr2 = abs(high - close.shift(1))
        tr3 = abs(low - close.shift(1))
        
        tr = pd.DataFrame({'tr1': tr1, 'tr2': tr2, 'tr3': tr3}).max(axis=1)
        atr = tr.rolling(window=period).mean()
        
        return pd.Series(atr, name='atr')
        
    def _calculate_adx(self, data: pd.DataFrame, period: int = 14) -> pd.DataFrame:
        """计算平均方向指数（ADX）"""
        if any(col not in data.columns for col in ['high', 'low', 'close']):
            raise ValueError("数据必须包含high、low和close列")
            
        high = data['high']
        low = data['low']
        close = data['close']
        
        # 计算方向移动：+DM和-DM
        plus_dm = high.diff()
        minus_dm = low.diff(-1).abs()
        
        # 当+DM > -DM且+DM > 0时，+DM = +DM，否则+DM = 0
        plus_dm = plus_dm.where((plus_dm > minus_dm) & (plus_dm > 0), 0)
        
        # 当-DM > +DM且-DM > 0时，-DM = -DM，否则-DM = 0
        minus_dm = minus_dm.where((minus_dm > plus_dm) & (minus_dm > 0), 0)
        
        # 计算真实范围（TR）
        tr1 = high - low
        tr2 = abs(high - close.shift(1))
        tr3 = abs(low - close.shift(1))
        tr = pd.DataFrame({'tr1': tr1, 'tr2': tr2, 'tr3': tr3}).max(axis=1)
        
        # 计算平滑值
        atr = tr.rolling(window=period).mean()
        plus_di = 100 * (plus_dm.rolling(window=period).mean() / atr)
        minus_di = 100 * (minus_dm.rolling(window=period).mean() / atr)
        
        # 计算方向指数（DX）
        dx = 100 * (abs(plus_di - minus_di) / (plus_di + minus_di)).fillna(0)
        
        # 计算平均方向指数（ADX）
        adx = dx.rolling(window=period).mean()
        
        result = pd.DataFrame(index=data.index)
        result['plus_di'] = plus_di
        result['minus_di'] = minus_di
        result['adx'] = adx
        
        return result
        
    # 交易量指标
    def _calculate_obv(self, data: pd.DataFrame) -> pd.Series:
        """计算能量潮指标（OBV）"""
        if any(col not in data.columns for col in ['close', 'volume']):
            raise ValueError("数据必须包含close和volume列")
            
        close = data['close']
        volume = data['volume']
        
        # 计算价格变化方向
        direction = np.sign(close.diff())
        
        # 将第一个值设为0，避免NaN
        direction.iloc[0] = 0
        
        # 计算OBV
        obv = (direction * volume).cumsum()
        
        return pd.Series(obv, name='obv')
        
    def _calculate_vwap(self, data: pd.DataFrame, period: int = None) -> pd.Series:
        """计算成交量加权平均价格（VWAP）"""
        if any(col not in data.columns for col in ['high', 'low', 'close', 'volume']):
            raise ValueError("数据必须包含high、low、close和volume列")
            
        # 计算典型价格
        typical_price = (data['high'] + data['low'] + data['close']) / 3
        
        # 计算价格与成交量的乘积
        price_volume = typical_price * data['volume']
        
        if period is None:
            # 全周期VWAP
            return price_volume.cumsum() / data['volume'].cumsum()
        else:
            # 滚动窗口VWAP
            return (price_volume.rolling(window=period).sum() / 
                   data['volume'].rolling(window=period).sum())
    
    # 时间特征
    def _extract_time_features(self, data: pd.DataFrame) -> pd.DataFrame:
        """提取时间特征"""
        # 确保索引是日期类型
        if not isinstance(data.index, pd.DatetimeIndex):
            raise ValueError("数据索引必须是DatetimeIndex类型")
            
        result = pd.DataFrame(index=data.index)
        
        # 提取日期特征
        result['day_of_week'] = data.index.dayofweek
        result['day_of_month'] = data.index.day
        result['week_of_year'] = data.index.isocalendar().week
        result['month'] = data.index.month
        result['quarter'] = data.index.quarter
        result['year'] = data.index.year
        
        # 是否为月初/月末
        result['is_month_start'] = data.index.is_month_start.astype(int)
        result['is_month_end'] = data.index.is_month_end.astype(int)
        
        # 是否为季初/季末
        result['is_quarter_start'] = data.index.is_quarter_start.astype(int)
        result['is_quarter_end'] = data.index.is_quarter_end.astype(int)
        
        # 是否为年初/年末
        result['is_year_start'] = data.index.is_year_start.astype(int)
        result['is_year_end'] = data.index.is_year_end.astype(int)
        
        # 周期性特征（三角函数变换）
        result['day_of_week_sin'] = np.sin(2 * np.pi * result['day_of_week'] / 7)
        result['day_of_week_cos'] = np.cos(2 * np.pi * result['day_of_week'] / 7)
        result['month_sin'] = np.sin(2 * np.pi * result['month'] / 12)
        result['month_cos'] = np.cos(2 * np.pi * result['month'] / 12)
        
        return result
    
    # 滞后特征
    def _create_lagged_features(self, data: pd.DataFrame, columns: List[str] = None, 
                              lags: List[int] = [1, 2, 3, 5, 10], returns: bool = False) -> pd.DataFrame:
        """创建滞后特征"""
        if columns is None:
            if 'close' in data.columns:
                columns = ['close']
            else:
                columns = [data.columns[0]]
                
        result = pd.DataFrame(index=data.index)
        
        for col in columns:
            if col not in data.columns:
                logging.warning(f"列 {col} 不存在，跳过")
                continue
                
            # 如果计算收益率
            if returns:
                series = data[col].pct_change()
                prefix = f'{col}_return_lag'
            else:
                series = data[col]
                prefix = f'{col}_lag'
                
            # 创建滞后值
            for lag in lags:
                result[f'{prefix}_{lag}'] = series.shift(lag)
                
        return result


class QMLPreprocessor:
    """数据预处理器，用于机器学习前的数据准备"""
    
    def __init__(self):
        """初始化预处理器"""
        pass
        
    def prepare_data(self, features: pd.DataFrame, target: pd.Series = None, 
                    test_size: float = 0.2, random_state: int = 42, 
                    scaling: str = 'standard', fill_method: str = 'ffill',
                    time_series_split: bool = True, n_splits: int = 5) -> Dict[str, Any]:
        """
        准备机器学习数据
        
        参数:
            features: 特征数据框
            target: 目标变量（可选）
            test_size: 测试集比例
            random_state: 随机种子
            scaling: 缩放方法，可选 'standard' 或 'minmax'
            fill_method: 缺失值填充方法
            time_series_split: 是否使用时间序列交叉验证
            n_splits: 交叉验证折数
            
        返回:
            Dict: 包含处理后的数据和相关对象
        """
        result = {}
        
        # 处理缺失值
        features_cleaned = self._handle_missing_values(features, method=fill_method)
        result['features_cleaned'] = features_cleaned
        
        # 创建特征缩放器
        if scaling == 'standard':
            scaler = StandardScaler()
        elif scaling == 'minmax':
            scaler = MinMaxScaler()
        else:
            raise ValueError(f"未知的缩放方法: {scaling}")
            
        # 如果有目标变量，则分割数据
        if target is not None:
            # 确保目标变量与特征具有相同的索引
            target = target.loc[features_cleaned.index]
            result['target'] = target
            
            if time_series_split:
                # 时间序列交叉验证
                tscv = TimeSeriesSplit(n_splits=n_splits)
                result['cv'] = tscv
                
                # 保存最后一个分割用于测试
                train_indices, test_indices = list(tscv.split(features_cleaned))[-1]
                
                X_train = features_cleaned.iloc[train_indices]
                X_test = features_cleaned.iloc[test_indices]
                y_train = target.iloc[train_indices]
                y_test = target.iloc[test_indices]
            else:
                # 随机分割
                X_train, X_test, y_train, y_test = train_test_split(
                    features_cleaned, target, test_size=test_size, random_state=random_state
                )
            
            # 缩放特征
            X_train_scaled = pd.DataFrame(
                scaler.fit_transform(X_train),
                index=X_train.index,
                columns=X_train.columns
            )
            
            X_test_scaled = pd.DataFrame(
                scaler.transform(X_test),
                index=X_test.index,
                columns=X_test.columns
            )
            
            result['X_train'] = X_train
            result['X_test'] = X_test
            result['y_train'] = y_train
            result['y_test'] = y_test
            result['X_train_scaled'] = X_train_scaled
            result['X_test_scaled'] = X_test_scaled
        else:
            # 只缩放特征
            features_scaled = pd.DataFrame(
                scaler.fit_transform(features_cleaned),
                index=features_cleaned.index,
                columns=features_cleaned.columns
            )
            result['features_scaled'] = features_scaled
            
        result['scaler'] = scaler
        
        return result
        
    def _handle_missing_values(self, data: pd.DataFrame, method: str = 'ffill') -> pd.DataFrame:
        """处理缺失值"""
        # 检查缺失值
        if data.isnull().sum().sum() > 0:
            logging.info(f"数据包含 {data.isnull().sum().sum()} 个缺失值")
            
            if method == 'ffill':
                # 向前填充
                data_filled = data.ffill()
                # 处理开头的缺失值
                data_filled = data_filled.bfill()
            elif method == 'bfill':
                # 向后填充
                data_filled = data.bfill()
                # 处理结尾的缺失值
                data_filled = data_filled.ffill()
            elif method == 'mean':
                # 均值填充
                data_filled = data.fillna(data.mean())
            elif method == 'median':
                # 中位数填充
                data_filled = data.fillna(data.median())
            elif method == 'drop':
                # 删除缺失值
                data_filled = data.dropna()
            else:
                raise ValueError(f"未知的填充方法: {method}")
                
            # 检查是否还有缺失值
            if data_filled.isnull().sum().sum() > 0:
                logging.warning(f"填充后仍有 {data_filled.isnull().sum().sum()} 个缺失值，将使用0填充")
                data_filled = data_filled.fillna(0)
                
            return data_filled
        else:
            return data.copy()
        
    def select_features(self, X: pd.DataFrame, y: pd.Series, method: str = 'k_best',
                       n_features: int = 10, **kwargs) -> Tuple[pd.DataFrame, Any]:
        """
        特征选择
        
        参数:
            X: 特征数据框
            y: 目标变量
            method: 特征选择方法
            n_features: 选择的特征数量
            
        返回:
            Tuple[pd.DataFrame, Any]: 选择后的特征和选择器
        """
        if method == 'k_best':
            if len(np.unique(y)) <= 10:  # 分类问题
                selector = SelectKBest(f_classif, k=n_features)
            else:  # 回归问题
                selector = SelectKBest(f_regression, k=n_features)
        elif method == 'mutual_info':
            if len(np.unique(y)) <= 10:  # 分类问题
                selector = SelectKBest(mutual_info_classif, k=n_features)
            else:  # 回归问题
                selector = SelectKBest(mutual_info_regression, k=n_features)
        elif method == 'pca':
            selector = PCA(n_components=n_features)
        elif method == 'rf':
            if len(np.unique(y)) <= 10:  # 分类问题
                selector = RandomForestClassifier(n_estimators=100, random_state=42)
            else:  # 回归问题
                selector = RandomForestRegressor(n_estimators=100, random_state=42)
        else:
            raise ValueError(f"未知的特征选择方法: {method}")
            
        # 特征选择
        X_selected = selector.fit_transform(X, y)
        
        # 如果使用PCA，无法直接获取列名
        if method == 'pca':
            columns = [f'PC{i+1}' for i in range(n_features)]
        elif method == 'rf':
            # 对于随机森林，使用特征重要性排序
            importances = selector.feature_importances_
            indices = np.argsort(importances)[::-1][:n_features]
            columns = [X.columns[i] for i in indices]
            # 只保留选择的特征
            X_selected = X.iloc[:, indices].values
        else:
            # 对于SelectKBest，获取选择的特征索引
            mask = selector.get_support()
            columns = X.columns[mask]
            
        # 转换为DataFrame
        X_selected_df = pd.DataFrame(X_selected, index=X.index, columns=columns)
        
        return X_selected_df, selector


class QMLModel:
    """机器学习模型封装类"""
    
    def __init__(self, model_type: str = 'classifier', model_name: str = 'random_forest', **model_params):
        """
        初始化模型
        
        参数:
            model_type: 模型类型，可选 'classifier' 或 'regressor'
            model_name: 模型名称
            model_params: 模型参数
        """
        self.model_type = model_type
        self.model_name = model_name
        self.model = self._create_model(model_name, model_type, **model_params)
        self.feature_importance = None
        self.is_fitted = False
        
    def _create_model(self, model_name: str, model_type: str, **model_params) -> BaseEstimator:
        """创建模型实例"""
        if model_type == 'classifier':
            # 分类模型
            if model_name == 'random_forest':
                return RandomForestClassifier(**model_params)
            elif model_name == 'gradient_boosting':
                return GradientBoostingClassifier(**model_params)
            elif model_name == 'svm':
                return SVC(**model_params)
            elif model_name == 'logistic_regression':
                return LogisticRegression(**model_params)
            elif model_name == 'mlp':
                return MLPClassifier(**model_params)
            else:
                raise ValueError(f"未知的分类器: {model_name}")
        elif model_type == 'regressor':
            # 回归模型
            if model_name == 'random_forest':
                return RandomForestRegressor(**model_params)
            elif model_name == 'gradient_boosting':
                return GradientBoostingRegressor(**model_params)
            elif model_name == 'svm':
                return SVR(**model_params)
            elif model_name == 'linear_regression':
                return LinearRegression(**model_params)
            elif model_name == 'ridge':
                return Ridge(**model_params)
            elif model_name == 'lasso':
                return Lasso(**model_params)
            elif model_name == 'mlp':
                return MLPRegressor(**model_params)
            else:
                raise ValueError(f"未知的回归器: {model_name}")
        else:
            raise ValueError(f"未知的模型类型: {model_type}")
            
    def fit(self, X: pd.DataFrame, y: pd.Series, **kwargs) -> 'QMLModel':
        """
        训练模型
        
        参数:
            X: 特征数据
            y: 目标变量
            kwargs: 额外的训练参数
            
        返回:
            self: 支持链式调用
        """
        self.model.fit(X, y, **kwargs)
        self.is_fitted = True
        
        # 提取特征重要性（如果可用）
        if hasattr(self.model, 'feature_importances_'):
            self.feature_importance = pd.DataFrame({
                'feature': X.columns,
                'importance': self.model.feature_importances_
            }).sort_values('importance', ascending=False)
            
        return self
        
    def predict(self, X: pd.DataFrame) -> np.ndarray:
        """
        预测
        
        参数:
            X: 特征数据
            
        返回:
            np.ndarray: 预测结果
        """
        if not self.is_fitted:
            raise ValueError("模型尚未训练")
            
        return self.model.predict(X)
        
    def predict_proba(self, X: pd.DataFrame) -> np.ndarray:
        """
        预测概率（仅适用于分类模型）
        
        参数:
            X: 特征数据
            
        返回:
            np.ndarray: 预测概率
        """
        if not self.is_fitted:
            raise ValueError("模型尚未训练")
            
        if self.model_type != 'classifier':
            raise ValueError("predict_proba仅适用于分类模型")
            
        if hasattr(self.model, 'predict_proba'):
            return self.model.predict_proba(X)
        else:
            raise ValueError(f"模型 {self.model_name} 不支持概率预测")
            
    def evaluate(self, X: pd.DataFrame, y: pd.Series) -> Dict[str, float]:
        """
        评估模型性能
        
        参数:
            X: 特征数据
            y: 目标变量
            
        返回:
            Dict[str, float]: 性能指标
        """
        if not self.is_fitted:
            raise ValueError("模型尚未训练")
            
        y_pred = self.predict(X)
        
        metrics = {}
        
        if self.model_type == 'classifier':
            # 分类指标
            metrics['accuracy'] = accuracy_score(y, y_pred)
            
            try:
                # 多分类问题可能不支持某些指标
                metrics['precision'] = precision_score(y, y_pred, average='weighted')
                metrics['recall'] = recall_score(y, y_pred, average='weighted')
                metrics['f1'] = f1_score(y, y_pred, average='weighted')
            except Exception as e:
                logging.warning(f"计算某些分类指标时出错: {str(e)}")
        else:
            # 回归指标
            metrics['mse'] = mean_squared_error(y, y_pred)
            metrics['rmse'] = np.sqrt(metrics['mse'])
            metrics['r2'] = r2_score(y, y_pred)
            
        return metrics
        
    def cross_validate(self, X: pd.DataFrame, y: pd.Series, cv=5, scoring=None) -> Dict[str, List[float]]:
        """
        交叉验证
        
        参数:
            X: 特征数据
            y: 目标变量
            cv: 交叉验证折数或交叉验证生成器
            scoring: 评分指标
            
        返回:
            Dict[str, List[float]]: 交叉验证结果
        """
        if scoring is None:
            scoring = 'accuracy' if self.model_type == 'classifier' else 'neg_mean_squared_error'
            
        scores = cross_val_score(self.model, X, y, cv=cv, scoring=scoring)
        
        return {
            'scores': scores,
            'mean_score': scores.mean(),
            'std_score': scores.std()
        }
        
    def save(self, path: str) -> None:
        """
        保存模型
        
        参数:
            path: 保存路径
        """
        os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
        
        with open(path, 'wb') as f:
            pickle.dump({
                'model': self.model,
                'model_type': self.model_type,
                'model_name': self.model_name,
                'feature_importance': self.feature_importance,
                'is_fitted': self.is_fitted
            }, f)
            
        logging.info(f"模型已保存到: {path}")
        
    @classmethod
    def load(cls, path: str) -> 'QMLModel':
        """
        加载模型
        
        参数:
            path: 模型路径
            
        返回:
            QMLModel: 加载的模型
        """
        with open(path, 'rb') as f:
            data = pickle.load(f)
            
        model = cls()
        model.model = data['model']
        model.model_type = data['model_type']
        model.model_name = data['model_name']
        model.feature_importance = data['feature_importance']
        model.is_fitted = data['is_fitted']
        
        return model
        
    def plot_feature_importance(self, top_n: int = 10, figsize: Tuple[int, int] = (10, 6)) -> plt.Figure:
        """
        绘制特征重要性图
        
        参数:
            top_n: 显示前N个重要特征
            figsize: 图形大小
            
        返回:
            plt.Figure: 图形对象
        """
        if self.feature_importance is None:
            raise ValueError("模型没有特征重要性信息")
            
        top_features = self.feature_importance.head(top_n)
        
        fig, ax = plt.subplots(figsize=figsize)
        ax.barh(top_features['feature'], top_features['importance'])
        ax.set_xlabel('重要性')
        ax.set_ylabel('特征')
        ax.set_title(f'{self.model_name} 特征重要性（前{top_n}个）')
        plt.tight_layout()
        
        return fig
        
    def plot_learning_curve(self, X: pd.DataFrame, y: pd.Series, 
                           train_sizes=np.linspace(0.1, 1.0, 5), cv=5,
                           scoring=None, figsize: Tuple[int, int] = (10, 6)) -> plt.Figure:
        """
        绘制学习曲线
        
        参数:
            X: 特征数据
            y: 目标变量
            train_sizes: 训练集大小
            cv: 交叉验证折数
            scoring: 评分指标
            figsize: 图形大小
            
        返回:
            plt.Figure: 图形对象
        """
        from sklearn.model_selection import learning_curve
        
        if scoring is None:
            scoring = 'accuracy' if self.model_type == 'classifier' else 'neg_mean_squared_error'
            
        train_sizes, train_scores, test_scores = learning_curve(
            self.model, X, y, train_sizes=train_sizes, cv=cv, scoring=scoring, n_jobs=-1
        )
        
        train_mean = np.mean(train_scores, axis=1)
        train_std = np.std(train_scores, axis=1)
        test_mean = np.mean(test_scores, axis=1)
        test_std = np.std(test_scores, axis=1)
        
        fig, ax = plt.subplots(figsize=figsize)
        ax.plot(train_sizes, train_mean, 'o-', color='r', label='训练集')
        ax.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.1, color='r')
        ax.plot(train_sizes, test_mean, 'o-', color='g', label='测试集')
        ax.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, alpha=0.1, color='g')
        ax.set_xlabel('训练样本数')
        ax.set_ylabel('得分')
        ax.set_title('学习曲线')
        ax.legend(loc='best')
        plt.grid(True)
        plt.tight_layout()
        
        return fig


class QTorchMLStrategy:
    """
    基于机器学习的量化交易策略基类
    
    使用机器学习模型生成交易信号，可用作QTorch策略的基础
    """
    
    def __init__(self, model_path: str = None, prediction_threshold: float = 0.5, 
               features_config: Dict[str, Any] = None, target_type: str = 'direction'):
        """
        初始化策略
        
        参数:
            model_path: 预训练模型路径
            prediction_threshold: 预测阈值（分类模型）
            features_config: 特征配置
            target_type: 目标类型，可选 'direction'（方向）, 'return'（收益率）, 'movement'（涨跌幅）
        """
        self.model = None
        if model_path:
            self.model = QMLModel.load(model_path)
            
        self.prediction_threshold = prediction_threshold
        self.target_type = target_type
        
        # 默认特征配置
        if features_config is None:
            self.features_config = {
                'sma': {'periods': [5, 10, 20, 50]},
                'rsi': {'period': 14},
                'macd': {},
                'volatility': {'period': 20},
                'lagged_features': {'columns': ['close'], 'lags': [1, 2, 3, 5]}
            }
        else:
            self.features_config = features_config
            
        # 特征提取器
        self.feature_extractor = QMLFeatureExtractor()
        
        # 特征数据缓存
        self._features_cache = None
        self._last_data_len = 0
        
    def generate_features(self, data: pd.DataFrame) -> pd.DataFrame:
        """
        生成特征
        
        参数:
            data: 市场数据
            
        返回:
            pd.DataFrame: 特征数据
        """
        return self.feature_extractor.extract_features(data, self.features_config)
        
    def generate_target(self, data: pd.DataFrame) -> pd.Series:
        """
        生成目标变量
        
        参数:
            data: 市场数据
            
        返回:
            pd.Series: 目标变量
        """
        if self.target_type == 'direction':
            # 价格方向（上涨=1，下跌=-1，不变=0）
            return np.sign(data['close'].shift(-1) - data['close'])
            
        elif self.target_type == 'return':
            # 下一期收益率
            return data['close'].pct_change(1).shift(-1)
            
        elif self.target_type == 'movement':
            # 涨跌幅分类（上涨超过0.5%=1，下跌超过0.5%=-1，其他=0）
            returns = data['close'].pct_change(1).shift(-1)
            return pd.Series(
                np.where(returns > 0.005, 1, np.where(returns < -0.005, -1, 0)),
                index=data.index
            )
            
        else:
            raise ValueError(f"未知的目标类型: {self.target_type}")
        
    def train(self, data: pd.DataFrame, model_type: str = 'classifier',
             model_name: str = 'random_forest', test_size: float = 0.2,
             save_path: str = None, **model_params) -> Dict[str, Any]:
        """
        训练模型
        
        参数:
            data: 市场数据
            model_type: 模型类型
            model_name: 模型名称
            test_size: 测试集比例
            save_path: 模型保存路径
            model_params: 模型参数
            
        返回:
            Dict[str, Any]: 训练结果
        """
        # 生成特征
        features = self.generate_features(data)
        
        # 生成目标变量
        target = self.generate_target(data)
        
        # 数据预处理
        preprocessor = QMLPreprocessor()
        prep_data = preprocessor.prepare_data(
            features, target, test_size=test_size, time_series_split=True
        )
        
        X_train = prep_data['X_train_scaled']
        y_train = prep_data['y_train']
        X_test = prep_data['X_test_scaled']
        y_test = prep_data['y_test']
        
        # 创建模型
        self.model = QMLModel(model_type=model_type, model_name=model_name, **model_params)
        
        # 训练模型
        self.model.fit(X_train, y_train)
        
        # 评估模型
        train_metrics = self.model.evaluate(X_train, y_train)
        test_metrics = self.model.evaluate(X_test, y_test)
        
        # 保存模型
        if save_path:
            self.model.save(save_path)
            
        return {
            'model': self.model,
            'train_metrics': train_metrics,
            'test_metrics': test_metrics,
            'preprocessor': preprocessor,
            'scaler': prep_data['scaler'],
            'feature_names': features.columns.tolist()
        }
        
    def predict(self, data: pd.DataFrame) -> np.ndarray:
        """
        生成预测
        
        参数:
            data: 市场数据
            
        返回:
            np.ndarray: 预测结果
        """
        if self.model is None:
            raise ValueError("模型尚未训练或加载")
            
        # 生成特征
        features = self.generate_features(data)
        
        # 处理缺失值
        features = features.ffill().bfill().fillna(0)
        
        # 预测
        return self.model.predict(features)
        
    def generate_signals(self, data: pd.DataFrame) -> np.ndarray:
        """
        生成交易信号
        
        参数:
            data: 市场数据
            
        返回:
            np.ndarray: 交易信号（1=买入，-1=卖出，0=持有）
        """
        if self.model is None:
            raise ValueError("模型尚未训练或加载")
            
        # 检查数据是否更新
        if self._features_cache is not None and len(data) == self._last_data_len:
            features = self._features_cache
        else:
            # 生成特征
            features = self.generate_features(data)
            self._features_cache = features
            self._last_data_len = len(data)
            
        # 处理缺失值
        features = features.ffill().bfill().fillna(0)
        
        # 预测
        if self.model.model_type == 'classifier':
            # 分类模型直接使用预测类别
            predictions = self.model.predict(features)
            
            # 如果是二分类问题，可能会返回0/1，转换为-1/1
            if set(np.unique(predictions)) == {0, 1}:
                predictions = predictions * 2 - 1
                
            return predictions
        else:
            # 回归模型需要转换为信号
            predictions = self.model.predict(features)
            
            # 根据阈值转换为信号
            signals = np.zeros_like(predictions)
            signals[predictions > self.prediction_threshold] = 1
            signals[predictions < -self.prediction_threshold] = -1
            
            return signals


class QTorchPyTorchModel:
    """PyTorch神经网络模型封装类"""
    
    def __init__(self, model=None, input_dim=None, hidden_dims=None, output_dim=None,
                dropout=0.2, learning_rate=0.001, device=None):
        """
        初始化PyTorch模型
        
        参数:
            model: 预定义模型（可选）
            input_dim: 输入维度（如果创建新模型）
            hidden_dims: 隐藏层维度列表（如果创建新模型）
            output_dim: 输出维度（如果创建新模型）
            dropout: Dropout比例（如果创建新模型）
            learning_rate: 学习率
            device: 计算设备，如果为None则自动选择最优设备
        """
        if not TORCH_AVAILABLE:
            raise ImportError("PyTorch未安装，无法使用此功能")
        
        # 导入设备管理器
        from qtorch.utils.device_utils import get_device, get_device_manager
            
        # 确定设备
        if device is None:
            # 使用设备管理器自动选择最优设备
            self.device = get_device()
        else:
            self.device = device
            
        # 创建或加载模型
        if model is not None:
            self.model = model
        elif input_dim is not None and output_dim is not None:
            if hidden_dims is None:
                hidden_dims = [128, 64]
                
            self.model = self._create_mlp(input_dim, hidden_dims, output_dim, dropout)
        else:
            self.model = None
            
        self.learning_rate = learning_rate
        self.input_dim = input_dim
        self.output_dim = output_dim
        
        # 将模型移至设备
        if self.model is not None:
            self.model = self.model.to(self.device)
            
        # 初始化优化器和损失函数
        if self.model is not None:
            self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
            
            # 根据输出维度判断任务类型
            if output_dim == 1:
                # 回归或二分类
                self.criterion = nn.MSELoss()
            else:
                # 多分类
                self.criterion = nn.CrossEntropyLoss()
                
        # 记录设备信息
        device_name = get_device_manager().get_device_name()
        logging.info(f"PyTorch模型初始化在设备: {device_name}")
                
    def _create_mlp(self, input_dim, hidden_dims, output_dim, dropout=0.2):
        """创建多层感知机"""
        layers = []
        
        # 输入层 -> 第一个隐藏层
        layers.append(nn.Linear(input_dim, hidden_dims[0]))
        layers.append(nn.ReLU())
        layers.append(nn.Dropout(dropout))
        
        # 隐藏层
        for i in range(len(hidden_dims) - 1):
            layers.append(nn.Linear(hidden_dims[i], hidden_dims[i+1]))
            layers.append(nn.ReLU())
            layers.append(nn.Dropout(dropout))
            
        # 输出层
        layers.append(nn.Linear(hidden_dims[-1], output_dim))
        
        # 如果是单输出且非分类问题，不需要额外的激活函数
        # 如果是多分类问题，在训练时会应用softmax
        
        return nn.Sequential(*layers)
        
    def fit(self, X, y, batch_size=32, epochs=100, validation_split=0.2,
           early_stopping=True, patience=10, verbose=1, use_mixed_precision=True):
        """
        训练模型
        
        参数:
            X: 特征数据
            y: 目标变量
            batch_size: 批量大小
            epochs: 训练轮数
            validation_split: 验证集比例
            early_stopping: 是否使用早停
            patience: 早停容忍轮数
            verbose: 详细程度
            use_mixed_precision: 是否使用混合精度训练(在GPU上可加速)
        
        返回:
            Dict: 训练历史
        """
        if not TORCH_AVAILABLE:
            raise ImportError("PyTorch未安装，无法使用此功能")
            
        if self.model is None:
            raise ValueError("模型未初始化")
            
        # 导入设备工具
        from qtorch.utils.device_utils import tensor_to_device, get_autocast_context
        
        # 转换数据为tensor
        X_tensor = torch.FloatTensor(X.values if isinstance(X, pd.DataFrame) else X)
        
        # 根据输出维度处理y
        if self.output_dim == 1:
            # 回归或二分类
            y_tensor = torch.FloatTensor(y.values.reshape(-1, 1) if isinstance(y, pd.Series) else y.reshape(-1, 1))
        else:
            # 多分类，需要转换为LongTensor
            y_tensor = torch.LongTensor(y.values if isinstance(y, pd.Series) else y)
            
        # 分割训练集和验证集
        dataset_size = len(X_tensor)
        indices = list(range(dataset_size))
        split = int(np.floor(validation_split * dataset_size))
        
        if validation_split < 1e-6:
            # 不使用验证集
            train_indices = indices
            val_indices = []
        else:
            # 使用验证集
            np.random.shuffle(indices)
            train_indices, val_indices = indices[split:], indices[:split]
            
        # 创建数据加载器
        train_dataset = torch.utils.data.TensorDataset(
            X_tensor[train_indices], y_tensor[train_indices]
        )
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=batch_size, shuffle=True
        )
        
        if val_indices:
            val_dataset = torch.utils.data.TensorDataset(
                X_tensor[val_indices], y_tensor[val_indices]
            )
            val_loader = torch.utils.data.DataLoader(
                val_dataset, batch_size=batch_size, shuffle=False
            )
        
        # 训练历史
        history = {
            'train_loss': [],
            'val_loss': []
        }
        
        # 早停变量
        best_val_loss = float('inf')
        early_stop_counter = 0
        
        # 获取混合精度上下文（如果启用）
        autocast = get_autocast_context(enabled=use_mixed_precision)
        
        # 如果启用混合精度，需要梯度缩放器
        scaler = torch.cuda.amp.GradScaler() if use_mixed_precision and self.device.type == 'cuda' else None
        
        # 训练循环
        for epoch in range(epochs):
            # 训练模式
            self.model.train()
            train_loss = 0
            
            for batch_X, batch_y in train_loader:
                # 移至设备
                batch_X, batch_y = tensor_to_device(batch_X, self.device), tensor_to_device(batch_y, self.device)
                
                # 混合精度训练
                with autocast:
                    # 前向传播
                    self.optimizer.zero_grad()
                    outputs = self.model(batch_X)
                    
                    # A计算损失
                    if self.output_dim == 1:
                        loss = self.criterion(outputs, batch_y)
                    else:
                        loss = self.criterion(outputs, batch_y.squeeze())
                
                # 使用缩放器进行反向传播（如果启用混合精度）
                if scaler is not None:
                    scaler.scale(loss).backward()
                    scaler.step(self.optimizer)
                    scaler.update()
                else:
                    # 正常反向传播
                    loss.backward()
                    self.optimizer.step()
                
                train_loss += loss.item()
                
            train_loss /= len(train_loader)
            history['train_loss'].append(train_loss)
            
            # 验证
            if val_indices:
                val_loss = self._validate(val_loader, use_mixed_precision)
                history['val_loss'].append(val_loss)
                
                # 打印进度
                if verbose > 0 and (epoch + 1) % verbose == 0:
                    print(f'Epoch {epoch+1}/{epochs} - train_loss: {train_loss:.4f} - val_loss: {val_loss:.4f}')
                
                # 早停
                if early_stopping:
                    if val_loss < best_val_loss:
                        best_val_loss = val_loss
                        early_stop_counter = 0
                    else:
                        early_stop_counter += 1
                        
                    if early_stop_counter >= patience:
                        if verbose > 0:
                            print(f'早停触发，停止在第 {epoch+1} 轮')
                        break
            else:
                # 无验证集
                if verbose > 0 and (epoch + 1) % verbose == 0:
                    print(f'Epoch {epoch+1}/{epochs} - train_loss: {train_loss:.4f}')
                    
        return history
        
    def _validate(self, val_loader, use_mixed_precision=True):
        """验证模型"""
        from qtorch.utils.device_utils import tensor_to_device, get_autocast_context
        
        self.model.eval()
        val_loss = 0
        
        # 获取混合精度上下文
        autocast = get_autocast_context(enabled=use_mixed_precision)
        
        with torch.no_grad():
            for batch_X, batch_y in val_loader:
                # 移至设备
                batch_X = tensor_to_device(batch_X, self.device)
                batch_y = tensor_to_device(batch_y, self.device)
                
                # 使用混合精度
                with autocast:
                    # 前向传播
                    outputs = self.model(batch_X)
                    
                    # 计算损失
                    if self.output_dim == 1:
                        loss = self.criterion(outputs, batch_y)
                    else:
                        loss = self.criterion(outputs, batch_y.squeeze())
                    
                val_loss += loss.item()
                
        return val_loss / len(val_loader)
        
    def predict(self, X, use_mixed_precision=True):
        """
        预测
        
        参数:
            X: 特征数据
            use_mixed_precision: 是否使用混合精度
            
        返回:
            np.ndarray: 预测结果
        """
        if not TORCH_AVAILABLE:
            raise ImportError("PyTorch未安装，无法使用此功能")
            
        if self.model is None:
            raise ValueError("模型未初始化")
            
        # 导入设备工具
        from qtorch.utils.device_utils import tensor_to_device, get_autocast_context
            
        # 转换数据为tensor
        X_tensor = torch.FloatTensor(X.values if isinstance(X, pd.DataFrame) else X)
        X_tensor = tensor_to_device(X_tensor, self.device)
        
        # 评估模式
        self.model.eval()
        
        # 获取混合精度上下文
        autocast = get_autocast_context(enabled=use_mixed_precision)
        
        with torch.no_grad():
            with autocast:
                predictions = self.model(X_tensor)
            
        # 处理输出
        if self.output_dim == 1:
            # 回归或二分类
            return predictions.cpu().numpy().flatten()
        else:
            # 多分类，应用softmax并返回类别
            probs = torch.softmax(predictions, dim=1).cpu().numpy()
            return np.argmax(probs, axis=1)
            
    def predict_proba(self, X, use_mixed_precision=True):
        """
        预测概率（分类问题）
        
        参数:
            X: 特征数据
            use_mixed_precision: 是否使用混合精度
            
        返回:
            np.ndarray: 概率预测
        """
        if not TORCH_AVAILABLE:
            raise ImportError("PyTorch未安装，无法使用此功能")
            
        if self.model is None:
            raise ValueError("模型未初始化")
            
        if self.output_dim <= 1:
            raise ValueError("predict_proba只适用于多分类问题")
        
        # 导入设备工具
        from qtorch.utils.device_utils import tensor_to_device, get_autocast_context
            
        # 转换数据为tensor
        X_tensor = torch.FloatTensor(X.values if isinstance(X, pd.DataFrame) else X)
        X_tensor = tensor_to_device(X_tensor, self.device)
        
        # 评估模式
        self.model.eval()
        
        # 获取混合精度上下文
        autocast = get_autocast_context(enabled=use_mixed_precision)
        
        with torch.no_grad():
            with autocast:
                predictions = self.model(X_tensor)
            probs = torch.softmax(predictions, dim=1).cpu().numpy()
            
        return probs
        
    def save(self, path):
        """
        保存模型
        
        参数:
            path: 保存路径
        """
        if not TORCH_AVAILABLE:
            raise ImportError("PyTorch未安装，无法使用此功能")
            
        if self.model is None:
            raise ValueError("模型未初始化")
            
        os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
        
        # 保存模型权重和配置
        state = {
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'input_dim': self.input_dim,
            'output_dim': self.output_dim,
            'learning_rate': self.learning_rate,
            'device_type': self.device.type,
        }
        
        # 将模型移至CPU保存，以确保可移植性
        if self.device.type != 'cpu':
            cpu_state_dict = {k: v.cpu() for k, v in self.model.state_dict().items()}
            state['model_state_dict'] = cpu_state_dict
        
        torch.save(state, path)
        logging.info(f"模型已保存到: {path}")
        
    @classmethod
    def load(cls, path, device=None):
        """
        加载模型
        
        参数:
            path: 模型路径
            device: 目标设备，如果为None则使用自动检测的最优设备
            
        返回:
            QTorchPyTorchModel: 加载的模型实例
        """
        if not TORCH_AVAILABLE:
            raise ImportError("PyTorch未安装，无法使用此功能")
            
        # 导入设备管理器
        from qtorch.utils.device_utils import get_device
        
        # 如果未指定设备，使用自动检测的最优设备
        if device is None:
            device = get_device()
        
        # 加载配置，先加载到CPU以保证兼容性
        state = torch.load(path, map_location=torch.device('cpu'))
        
        # 创建模型实例
        instance = cls(
            input_dim=state['input_dim'],
            output_dim=state['output_dim'],
            learning_rate=state['learning_rate'],
            device=device
        )
        
        # 加载模型权重
        instance.model.load_state_dict(state['model_state_dict'])
        instance.optimizer.load_state_dict(state['optimizer_state_dict'])
        
        # 将模型移至目标设备
        instance.model = instance.model.to(device)
        
        # 记录加载信息
        original_device = state.get('device_type', 'unknown')
        logging.info(f"模型从 {original_device} 设备加载到 {device.type} 设备")
        
        return instance


class QML:
    """QTorch机器学习集成模块的主类"""
    
    def __init__(self):
        """初始化"""
        self.feature_extractor = QMLFeatureExtractor()
        self.preprocessor = QMLPreprocessor()
        
    def create_model(self, model_type='classifier', model_name='random_forest', **model_params):
        """创建机器学习模型"""
        return QMLModel(model_type=model_type, model_name=model_name, **model_params)
        
    def create_pytorch_model(self, input_dim, output_dim, hidden_dims=None, **model_params):
        """创建PyTorch模型"""
        if not TORCH_AVAILABLE:
            raise ImportError("PyTorch未安装，无法使用此功能")
            
        return QTorchPyTorchModel(input_dim=input_dim, output_dim=output_dim, 
                                hidden_dims=hidden_dims, **model_params)
                                
    def create_strategy(self, model_path=None, features_config=None):
        """创建基于机器学习的交易策略"""
        return QTorchMLStrategy(model_path=model_path, features_config=features_config)
        
    def extract_features(self, data, features_config):
        """从数据中提取特征"""
        return self.feature_extractor.extract_features(data, features_config)
        
    def prepare_data(self, features, target=None, **kwargs):
        """准备机器学习数据"""
        return self.preprocessor.prepare_data(features, target, **kwargs)
        
    def train_test_model(self, X, y, model=None, model_type='classifier', model_name='random_forest',
                       test_size=0.2, **model_params):
        """一步式训练和测试模型"""
        # 数据预处理
        prep_data = self.preprocessor.prepare_data(X, y, test_size=test_size)
        
        X_train = prep_data['X_train_scaled']
        y_train = prep_data['y_train']
        X_test = prep_data['X_test_scaled']
        y_test = prep_data['y_test']
        
        # 创建或使用模型
        if model is None:
            model = self.create_model(model_type=model_type, model_name=model_name, **model_params)
            
        # 训练模型
        model.fit(X_train, y_train)
        
        # 评估模型
        train_metrics = model.evaluate(X_train, y_train)
        test_metrics = model.evaluate(X_test, y_test)
        
        return {
            'model': model,
            'train_metrics': train_metrics,
            'test_metrics': test_metrics,
            'X_train': X_train,
            'y_train': y_train,
            'X_test': X_test,
            'y_test': y_test,
            'scaler': prep_data['scaler']
        }
        
    def visualize_results(self, model, X, y, feature_names=None):
        """可视化模型结果"""
        import matplotlib.pyplot as plt
        import seaborn as sns
        
        # 创建绘图区域
        fig, axes = plt.subplots(2, 2, figsize=(18, 12))
        
        # 1. 特征重要性（如果可用）
        if hasattr(model, 'feature_importance') and model.feature_importance is not None:
            top_n = min(10, len(model.feature_importance))
            top_features = model.feature_importance.head(top_n)
            
            sns.barplot(x='importance', y='feature', data=top_features, ax=axes[0, 0])
            axes[0, 0].set_title('特征重要性')
            
        # 2. 预测与实际值对比
        y_pred = model.predict(X)
        
        if model.model_type == 'regressor':
            # 回归问题：散点图
            axes[0, 1].scatter(y, y_pred, alpha=0.5)
            axes[0, 1].plot([y.min(), y.max()], [y.min(), y.max()], 'r--')
            axes[0, 1].set_xlabel('实际值')
            axes[0, 1].set_ylabel('预测值')
            axes[0, 1].set_title('预测 vs 实际')
            
        else:
            # 分类问题：混淆矩阵
            from sklearn.metrics import confusion_matrix
            cm = confusion_matrix(y, y_pred)
            sns.heatmap(cm, annot=True, fmt='d', ax=axes[0, 1])
            axes[0, 1].set_xlabel('预测类别')
            axes[0, 1].set_ylabel('实际类别')
            axes[0, 1].set_title('混淆矩阵')
            
        # 3. 性能指标
        metrics = model.evaluate(X, y)
        metrics_df = pd.DataFrame({
            '指标': list(metrics.keys()),
            '值': list(metrics.values())
        })
        
        axes[1, 0].axis('off')
        axes[1, 0].table(
            cellText=metrics_df.values,
            colLabels=metrics_df.columns,
            loc='center',
            cellLoc='center'
        )
        axes[1, 0].set_title('性能指标')
        
        # 4. 学习曲线或PCA可视化
        try:
            from sklearn.decomposition import PCA
            if X.shape[1] > 2:
                pca = PCA(n_components=2)
                X_pca = pca.fit_transform(X)
                
                if model.model_type == 'classifier':
                    scatter = axes[1, 1].scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='viridis', alpha=0.5)
                    axes[1, 1].set_title('PCA降维可视化')
                    plt.colorbar(scatter, ax=axes[1, 1])
                else:
                    scatter = axes[1, 1].scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred, cmap='coolwarm', alpha=0.5)
                    axes[1, 1].set_title('PCA降维 + 预测值')
                    plt.colorbar(scatter, ax=axes[1, 1])
            else:
                axes[1, 1].text(0.5, 0.5, '数据维度不足，无法进行PCA可视化', 
                          horizontalalignment='center', verticalalignment='center')
        except Exception as e:
            logging.warning(f"PCA可视化失败: {str(e)}")
            axes[1, 1].text(0.5, 0.5, f'可视化失败: {str(e)}', 
                      horizontalalignment='center', verticalalignment='center')
        
        plt.tight_layout()
        return fig