#coding:utf-8

import pandas as pd
import numpy as np
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.svm import SVR
import lightgbm as lgb
import xgboost as xgb
import warnings
warnings.filterwarnings('ignore')

# 模型配置和状态
_stacking_state = {
    'trained_base_models': [],
    'trained_meta_model': None,
    'valid_models': [],
    'best_params': {}
}

def _tune_hyperparameters(model, param_grid, X, y):
    """
    自动调整模型超参数
    
    Args:
        model: 要调优的模型
        param_grid: 参数网格
        X: 训练特征
        y: 目标变量
    
    Returns:
        调优后的模型和最佳参数
    """
    # 简化超参数调优，使用较少的参数组合以提高速度
    try:
        grid_search = GridSearchCV(
            estimator=model,
            param_grid=param_grid,
            cv=2,  # 减少交叉验证折数
            scoring='r2',
            n_jobs=1   ,  # 使用单线程避免内存问题
            verbose=0
        )
        grid_search.fit(X, y)
        return grid_search.best_estimator_, grid_search.best_params_
    except Exception as e:
        print(f"超参数调优失败: {e}，使用默认参数")
        return model, {}

def _get_param_grids(model_name):
    """
    获取各模型的参数网格
    """
    param_grids = {
        'rf': {
            'n_estimators': [50, 100, 200],
            'max_depth': [None, 10, 20],
            'min_samples_split': [2, 5, 10]
        },
        'gb': {
            'n_estimators': [50, 100, 200],
            'learning_rate': [0.01, 0.05, 0.1],
            'max_depth': [3, 5, 7]
        },
        'ridge': {
            'alpha': [0.1, 1.0, 10.0]
        },
        'lasso': {
            'alpha': [0.01, 0.1, 1.0]
        },
        'svr': {
            'C': [0.1, 1.0, 10.0],
            'gamma': ['scale', 'auto']
        },
        'lgbm': {
            'n_estimators': [50, 100, 200],
            'learning_rate': [0.01, 0.05, 0.1],
            'num_leaves': [31, 63, 127]
        },
        'xgb': {
            'n_estimators': [50, 100, 200],
            'learning_rate': [0.01, 0.05, 0.1],
            'max_depth': [3, 5, 7]
        }
    }
    return param_grids.get(model_name, {})

def get_default_base_models(X=None, y=None):
    """
    根据数据特征动态调整并获取基础模型列表
    
    Args:
        X: 训练特征数据（可选），用于根据数据特征调整超参数
        y: 目标变量（可选）
    
    Returns:
        基础模型列表，每个模型都添加了算法类型注释和数据适应性调整的超参数
    """
    # 根据数据特征动态调整超参数
    n_samples = 0
    n_features = 0
    
    # 获取数据维度信息
    if X is not None:
        if hasattr(X, 'shape'):
            n_samples, n_features = X.shape if len(X.shape) >= 2 else (len(X), 1)
        elif hasattr(X, '__len__') and hasattr(X, 'columns'):
            n_samples = len(X)
            n_features = len(X.columns)
    
    print(f"根据数据特征调整模型超参数: 样本数={n_samples}, 特征数={n_features}")
    
    # 根据数据规模确定超参数调整策略
    if n_samples > 1000:
        # 大数据集：减少树的数量，增加学习率，控制过拟合
        tree_estimators = 50
        learning_rate = 0.1
        complexity_control = 5  # 小的深度或叶子数
    elif n_samples > 500:
        # 中等数据集：平衡模型复杂度
        tree_estimators = 100
        learning_rate = 0.05
        complexity_control = 8
    else:
        # 小数据集：增加模型复杂度
        tree_estimators = 200
        learning_rate = 0.01
        complexity_control = 15
    
    # 根据特征数量调整正则化强度
    if n_features > 50:
        # 高维数据：增加正则化强度
        reg_alpha = 0.1
        svr_C = 0.1
    elif n_features > 20:
        # 中维数据：中等正则化强度
        reg_alpha = 0.01
        svr_C = 1.0
    else:
        # 低维数据：较弱的正则化
        reg_alpha = 0.001
        svr_C = 10.0
    
    # 动态构建模型列表
    return [
        # 随机森林回归器 - 集成学习算法，基于多棵决策树
        ('rf', RandomForestRegressor(
            random_state=42,
            n_estimators=tree_estimators,
            max_depth=complexity_control if complexity_control <= 30 else None,
            min_samples_split=2 if n_samples > 100 else 1,
            min_samples_leaf=1 if n_samples > 50 else 1
        )),
        # Ridge回归器 - L2正则化线性回归算法
        ('ridge', Ridge(
            random_state=42,
            alpha=reg_alpha * 10,  # Ridge通常需要较大的alpha值
            max_iter=1000
        )),
        # Lasso回归器 - L1正则化线性回归算法
        ('lasso', Lasso(
            random_state=42,
            alpha=reg_alpha,
            max_iter=1000,
            tol=0.001
        )),
        # LightGBM回归器 - 梯度提升树算法，高效且适合大数据集
        ('lgbm', lgb.LGBMRegressor(
            random_state=42,
            n_estimators=tree_estimators,
            learning_rate=learning_rate,
            num_leaves=2 ** min(complexity_control, 7),  # 限制最大叶子数
            max_depth=complexity_control if complexity_control > 0 else -1,
            subsample=0.8 if n_samples > 100 else 1.0,
            colsample_bytree=0.8 if n_features > 10 else 1.0
        )),
        # 梯度提升回归器 - 基于残差提升的集成学习算法
        ('gb', GradientBoostingRegressor(
            random_state=42,
            n_estimators=tree_estimators,
            learning_rate=learning_rate,
            max_depth=complexity_control // 3  # GBR对深度更敏感
        )),
        # AdaBoost回归器 - 自适应提升算法，关注被错误分类的样本
        ('adaboost', AdaBoostRegressor(
            random_state=42,
            n_estimators=tree_estimators,
            learning_rate=learning_rate * 2  # AdaBoost通常需要较大的学习率
        )),
        # ElasticNet回归器 - 结合L1和L2正则化的线性回归算法
        ('elasticnet', ElasticNet(
            random_state=42,
            alpha=reg_alpha,
            l1_ratio=0.5,  # 平衡L1和L2正则化
            max_iter=1000,
            tol=0.001
        )),
        # 支持向量回归器 - 基于核方法的回归算法
        ('svr', SVR(
            C=svr_C,
            gamma='scale',
            kernel='rbf',
            epsilon=0.01 if n_samples > 100 else 0.001
        )),
        # XGBoost回归器 - 优化的梯度提升树算法
        ('xgb', xgb.XGBRegressor(
            random_state=42,
            n_estimators=tree_estimators,
            learning_rate=learning_rate,
            max_depth=complexity_control // 2,
            subsample=0.8 if n_samples > 100 else 1.0,
            colsample_bytree=0.8 if n_features > 10 else 1.0,
            reg_alpha=reg_alpha,
            reg_lambda=reg_alpha * 2
        ))
    ]

def train_stacking_model(X, y, base_models=None, meta_model=None, n_splits=5):
    """
    训练Stacking模型
    
    Args:
        X: 训练特征
        y: 目标变量
        base_models: 基础模型列表
        meta_model: 元模型
        n_splits: K折交叉验证的折数
    
    Returns:
        训练好的模型状态字典
    """
    # 重置状态
    _stacking_state['trained_base_models'] = []
    _stacking_state['trained_meta_model'] = None
    _stacking_state['valid_models'] = []
    _stacking_state['best_params'] = {}
    
    print("开始训练Stacking模型...")
    
    # 获取默认模型
    if base_models is None:
        # 将数据传递给get_default_base_models以实现动态超参数调整
        base_models = get_default_base_models(X, y)
    
    if meta_model is None:
        # 默认使用Ridge回归作为元模型 - L2正则化线性回归算法用于模型集成
        meta_model = Ridge(random_state=42)
    
    # 转换为numpy数组
    if isinstance(X, pd.DataFrame):
        X = X.values
    if isinstance(y, pd.Series):
        y = y.values
    
    # 创建K折交叉验证
    kf = KFold(n_splits=min(n_splits, len(X)//2), shuffle=True, random_state=42)
    
    # 用于存储基模型的预测结果（用于训练元模型）
    meta_features = np.zeros((X.shape[0], 0))
    
    # 存储所有模型的分数
    all_scores = []
    all_models = []
    
    for name, model in base_models:
        print(f"\n训练基础模型: {name}")
        
        try:
            # 获取参数网格并调优
            param_grid = _get_param_grids(name)
            if param_grid:
                print(f"正在调整{name}的超参数...")
                tuned_model, best_params = _tune_hyperparameters(model, param_grid, X, y)
                _stacking_state['best_params'][name] = best_params
                print(f"{name}最佳参数: {best_params}")
            else:
                tuned_model = model
            
            # 用于存储该模型在验证集上的预测
            model_predictions = np.zeros_like(y, dtype=np.float64)
            
            # K折交叉验证训练基模型
            fold_scores = []
            for fold, (train_idx, val_idx) in enumerate(kf.split(X)):
                X_train_fold, X_val_fold = X[train_idx], X[val_idx]
                y_train_fold, y_val_fold = y[train_idx], y[val_idx]
                
                try:
                    # 训练模型
                    fold_model = tuned_model.__class__(**tuned_model.get_params())
                    fold_model.fit(X_train_fold, y_train_fold)
                    
                    # 在验证集上预测
                    y_val_pred = fold_model.predict(X_val_fold)
                    model_predictions[val_idx] = y_val_pred
                    
                    # 计算该折的R2分数
                    fold_score = r2_score(y_val_fold, y_val_pred)
                    fold_scores.append(fold_score)
                    print(f"  折 {fold+1} R2分数: {fold_score:.4f}")
                except Exception as e:
                    print(f"  折 {fold+1} 训练失败: {e}")
                    fold_scores.append(0)
            
            # 计算平均R2分数
            avg_score = np.mean(fold_scores)
            print(f"{name}平均R2分数: {avg_score:.4f}")
            
            all_scores.append(avg_score)
            all_models.append((name, tuned_model, model_predictions))
            
        except Exception as e:
            print(f"{name}模型训练失败: {e}")
            all_scores.append(-1)  # 表示失败
    
    # 选择准确率超过0.6的模型
    for i, score in enumerate(all_scores):
        if score > 0.6:
            name, model, predictions = all_models[i]
            print(f"{name}模型准确率超过0.6，保留用于Stacking")
            _stacking_state['valid_models'].append((name, model))
            meta_features = np.column_stack((meta_features, predictions))
            
            # 在整个训练集上训练最终的基模型
            final_model = model.__class__(**model.get_params())
            final_model.fit(X, y)
            _stacking_state['trained_base_models'].append(final_model)
    
    # 如果没有模型的准确率超过0.6，选择表现最好的模型
    if not _stacking_state['valid_models'] and len(all_scores) > 0:
        best_idx = np.argmax(all_scores)
        best_score = all_scores[best_idx]
        if best_score >= 0:
            print(f"所有模型准确率都低于0.6，使用表现最好的模型 (分数: {best_score:.4f})")
            name, model, predictions = all_models[best_idx]
            _stacking_state['valid_models'].append((name, model))
            meta_features = predictions.reshape(-1, 1)
            
            # 在整个训练集上训练最终的基模型
            final_model = model.__class__(**model.get_params())
            final_model.fit(X, y)
            _stacking_state['trained_base_models'].append(final_model)
        else:
            print("所有模型训练失败，使用默认的Ridge模型")
            # 使用简单的Ridge模型作为备选 - L2正则化线性回归
            default_model = Ridge(random_state=42)
            default_model.fit(X, y)
            _stacking_state['trained_base_models'].append(default_model)
            meta_features = default_model.predict(X).reshape(-1, 1)
    
    # 训练元模型
    print("\n训练元模型...")
    try:
        _stacking_state['trained_meta_model'] = meta_model.__class__(**meta_model.get_params())
        _stacking_state['trained_meta_model'].fit(meta_features, y)
        
        # 计算整体模型性能
        final_predictions = predict_stacking_model(X)
        final_score = r2_score(y, final_predictions)
        print(f"Stacking模型最终R2分数: {final_score:.4f}")
    except Exception as e:
        print(f"元模型训练失败: {e}，使用直接平均预测")
        # 如果元模型训练失败，使用基模型预测的平均值
        _stacking_state['trained_meta_model'] = None
    
    return _stacking_state

def predict_stacking_model(X):
    """
    使用训练好的Stacking模型进行预测
    
    Args:
        X: 输入特征
    
    Returns:
        预测结果
    """
    if isinstance(X, pd.DataFrame):
        X = X.values
    
    # 确保X中没有NaN或无穷大值
    X = np.nan_to_num(X)
    
    # 获取每个基模型的预测
    base_predictions = []
    for model in _stacking_state['trained_base_models']:
        try:
            pred = model.predict(X)
            base_predictions.append(pred)
        except Exception as e:
            print(f"基模型预测失败: {e}")
    
    # 如果没有基模型预测成功，返回0数组
    if not base_predictions:
        return np.zeros(X.shape[0])
    
    # 构造元特征
    meta_features = np.column_stack(base_predictions)
    
    # 使用元模型进行最终预测或直接返回平均值
    if _stacking_state['trained_meta_model'] is not None:
        try:
            final_predictions = _stacking_state['trained_meta_model'].predict(meta_features)
        except Exception:
            # 如果元模型预测失败，使用平均值
            final_predictions = np.mean(base_predictions, axis=0)
    else:
        # 如果没有元模型，使用基模型预测的平均值
        final_predictions = np.mean(base_predictions, axis=0)
    
    return final_predictions

# 加载Excel数据的函数
def load_excel_data(file_path):
    """
    加载Excel文件数据
    
    Args:
        file_path: Excel文件路径
    
    Returns:
        DataFrame
    """
    print(f"正在加载数据: {file_path}")
    try:
        # 尝试使用不同的引擎
        for engine in ['openpyxl', 'xlrd']:
            try:
                df = pd.read_excel(file_path, engine=engine)
                print(f"数据加载成功，形状: {df.shape}")
                return df
            except:
                continue
        # 如果都失败，抛出异常
        raise Exception("无法加载Excel文件")
    except Exception as e:
        print(f"读取文件失败: {e}")
        raise

# 数据预处理函数
def preprocess_data(df, target_col=None):
    """
    数据预处理函数
    
    Args:
        df: 原始数据DataFrame
        target_col: 目标列名，为None时在训练模式下使用最后一列
    
    Returns:
        训练模式(target_col不为None): (特征数据, 目标变量)
        预测模式(target_col为None): 特征数据
    """
    print("数据预处理开始...")
    
    # 确保数据是DataFrame
    if not isinstance(df, pd.DataFrame):
        df = pd.DataFrame(df)
    
    # 复制数据并转换列名为字符串
    data = df.copy()
    data.columns = [str(col) for col in data.columns]
    
    # 提取数值特征
    numeric_cols = data.select_dtypes(include=[np.number]).columns
    data = data[numeric_cols]
    
    # 处理缺失值和无穷大
    data = data.fillna(0).replace([np.inf, -np.inf], 0)
    
    # 训练模式 - 返回特征和目标
    if target_col is not None:
        # 确保至少有一列数据
        if len(data.columns) == 0:
            print("警告: 没有数值列可用，创建空特征和目标")
            return pd.DataFrame(), pd.Series()
        
        # 分离特征和目标
        if len(data.columns) > 1:
            features = data.iloc[:, :-1]  # 除了最后一列的所有列作为特征
            target = data.iloc[:, -1]     # 最后一列作为目标
        else:
            # 只有一列时的处理
            features = pd.DataFrame(index=data.index)
            target = data.iloc[:, 0]
        
        print(f"预处理完成: 特征数={len(features.columns)}, 样本数={len(features)}")
        return features, target
    else:
        # 预测模式 - 只返回特征
        print(f"预处理完成: 特征数={len(data.columns)}, 样本数={len(data)}")
        return data