import lightgbm as lgb
import pandas as pd
import numpy as np
import joblib
from pathlib import Path
from featurework import prepare_data
from sklearn.model_selection import TimeSeriesSplit
import time
import sys
import os
import xgboost as xgb
from sklearn.metrics import mean_squared_error

# 设置随机种子
np.random.seed(42)

# 获取项目根目录路径
PROJECT_ROOT = Path(__file__).resolve().parents[2]
MODEL_DIR = PROJECT_ROOT / "model"
MODEL_DIR.mkdir(parents=True, exist_ok=True)


def clean_inf_nan(df):
    """清理无穷大和NaN值"""
    df = df.replace([np.inf, -np.inf], np.nan)
    df = df.fillna(0)

    # 限制极大值 - 使用99.9%分位数作为上限
    for col in df.columns:
        if df[col].dtype.kind in 'f':  # 只处理浮点数列
            q99 = df[col].quantile(0.999)
            if q99 > 0:
                df[col] = df[col].clip(upper=q99 * 10)  # 允许最大10倍于99.9%分位数
    return df


def rank_loss(y_pred, dataset):
    """自定义排名损失函数"""
    y_true = dataset.get_label()
    pred_rank = np.argsort(np.argsort(y_pred))
    true_rank = np.argsort(np.argsort(y_true))
    loss = np.mean(np.square(pred_rank - true_rank))
    return 'rank_loss', loss, False


def train_model():
    """训练模型"""
    print("=" * 50)
    print("开始准备数据...")
    try:
        X_train, y_train, _, _ = prepare_data()
        print(f"训练数据形状: {X_train.shape}")
    except Exception as e:
        print(f"数据准备失败: {str(e)}")
        import traceback
        traceback.print_exc()
        return []

    # 检查数据
    if X_train.empty or y_train.empty:
        print("错误: 训练数据为空")
        return []

    print("开始模型训练...")

    # 时间序列分割
    tscv = TimeSeriesSplit(n_splits=3)
    # 存储模型
    lgb_models = []
    xgb_models = []
    for fold, (train_idx, val_idx) in enumerate(tscv.split(X_train)):
        print(f"\n训练第 {fold + 1}/3 折模型...")

        # 划分训练集和验证集
        X_tr, X_val = X_train.iloc[train_idx], X_train.iloc[val_idx]
        y_tr, y_val = y_train.iloc[train_idx], y_train.iloc[val_idx]

        # 添加数据清洗 - 修复点
        X_tr = clean_inf_nan(X_tr)
        X_val = clean_inf_nan(X_val)
        y_tr = clean_inf_nan(y_tr.to_frame()).iloc[:, 0]  # 处理Series
        y_val = clean_inf_nan(y_val.to_frame()).iloc[:, 0]

        # ================== LightGBM 模型 ==================
        print("训练LightGBM模型...")

        # 创建样本权重 - 关注极端值
        weights = np.ones(len(y_tr))
        top_10 = np.argsort(y_tr)[-len(y_tr) // 10:]
        bottom_10 = np.argsort(y_tr)[:len(y_tr) // 10]
        weights[top_10] = 2.0
        weights[bottom_10] = 2.0

        # LightGBM参数
        lgb_params = {
            'objective': 'regression',
            'metric': 'rmse',
            'boosting_type': 'gbdt',
            'num_leaves': 63,
            'learning_rate': 0.02,  # 降低学习率
            'feature_fraction': 0.8,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'max_depth': 7,
            'min_data_in_leaf': 50,
            'lambda_l1': 0.3,
            'lambda_l2': 0.3,
            'verbosity': -1,
            'seed': 42
        }

        # 创建数据集
        train_data = lgb.Dataset(X_tr, label=y_tr, weight=weights)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)

        # 训练模型
        lgb_model = lgb.train(
            lgb_params,
            train_data,
            num_boost_round=1000,
            valid_sets=[val_data],
            callbacks=[
                lgb.early_stopping(stopping_rounds=50, verbose=True),
                lgb.log_evaluation(period=100)
            ]
        )

        # 保存模型
        lgb_model_path = MODEL_DIR / f"lgb_model_fold{fold + 1}.pkl"
        joblib.dump(lgb_model, lgb_model_path)
        lgb_models.append(lgb_model)
        print(f"LightGBM模型保存至: {lgb_model_path}")

        # ================== XGBoost 模型 ==================
        print("训练XGBoost模型...")

        # 在创建DMatrix时显式处理缺失值 - 额外保护
        dtrain = xgb.DMatrix(X_tr, label=y_tr, weight=weights, missing=np.inf)
        dval = xgb.DMatrix(X_val, label=y_val, missing=np.inf)

        # XGBoost参数
        xgb_params = {
            'objective': 'reg:squarederror',
            'eval_metric': 'rmse',
            'eta': 0.02,
            'max_depth': 6,
            'subsample': 0.8,
            'colsample_bytree': 0.8,
            'seed': 42,
            'nthread': -1
        }

        # 创建数据集
        dtrain = xgb.DMatrix(X_tr, label=y_tr, weight=weights)
        dval = xgb.DMatrix(X_val, label=y_val)

        # 训练模型
        xgb_model = xgb.train(
            xgb_params,
            dtrain,
            num_boost_round=1000,
            evals=[(dval, 'validation')],
            early_stopping_rounds=50,
            verbose_eval=100
        )

        # 保存模型
        xgb_model_path = MODEL_DIR / f"xgb_model_fold{fold + 1}.pkl"
        joblib.dump(xgb_model, xgb_model_path)
        xgb_models.append(xgb_model)
        print(f"XGBoost模型保存至: {xgb_model_path}")

    return lgb_models, xgb_models


if __name__ == "__main__":
    start_time = time.time()
    print("=" * 50)
    print("开始股票预测模型训练...")

    lgb_models, xgb_models = train_model()

    if lgb_models and xgb_models:
        print(f"成功训练 {len(lgb_models)} 个LightGBM模型和 {len(xgb_models)} 个XGBoost模型")
    else:
        print("模型训练失败")

    print(f"训练完成，耗时: {time.time() - start_time:.1f}秒")
    print("=" * 50)