# hyperparameter_tuning.py

import warnings
import os
import sys
import ctypes
import logging
import tensorflow as tf

# ===== 彻底屏蔽所有警告和日志输出 =====
def silence_all():
    warnings.filterwarnings("ignore")
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['PYTHONWARNINGS'] = "ignore"

    # 屏蔽 stderr 输出（TensorFlow C++ 层级日志）
    def silence_stderr():
        STDERR_FILENO = 2
        devnull = os.open(os.devnull, os.O_WRONLY)
        old_stderr = os.dup(STDERR_FILENO)
        os.dup2(devnull, STDERR_FILENO)
        os.close(devnull)
        return old_stderr

    global _old_stderr
    _old_stderr = silence_stderr()

    # 日志模块关闭
    logging.disable(logging.CRITICAL)
    logging.root.setLevel(logging.ERROR)

    # TensorFlow 日志控制
    tf.get_logger().setLevel(logging.ERROR)
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    # 恢复 stderr（可选）
    # resume_stderr(_old_stderr)


silence_all()
# ===== 导入其他库 =====
import pandas as pd
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
import joblib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from scikeras.wrappers import KerasRegressor

# ===== 构建 DNN 模型函数 =====
def create_dnn_model(input_shape, optimizer='adam', dropout_rate=0.3):
    """
    构建一个用于回归任务的 DNN 模型
    """
    model = Sequential([
        Dense(128, activation='relu', input_shape=(input_shape,)),
        Dropout(dropout_rate),
        Dense(64, activation='relu'),
        Dense(1)  # 输出层（回归）
    ])

    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',
                  metrics=['mae'])
    return model


if __name__ == "__main__":
    print("【开始】加载数据...")
    df = pd.read_parquet("../data/features/feature_engineered_data.parquet")
    X = df.drop(columns=['Average_Fare'])
    y = df['Average_Fare']

    print(f" 数据形状：X={X.shape}, y={y.shape}")

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # ==============================
    # 随机森林模型调参
    # ==============================
    rf_model = RandomForestRegressor(random_state=42)

    rf_param_grid = {
        'n_estimators': [50, 100, 200],
        'max_depth': [None, 10, 20, 30],
        'min_samples_split': [2, 4, 6],
        'min_samples_leaf': [1, 2, 4],
        'max_features': ['sqrt', 'log2']
    }

    rf_grid_search = GridSearchCV(
        estimator=rf_model,
        param_grid=rf_param_grid,
        cv=5,
        scoring='neg_mean_squared_error',
        n_jobs=-1,
        verbose=0
    )

    print(" 开始随机森林超参数搜索...")
    rf_grid_result = rf_grid_search.fit(X_train, y_train)

    rf_best_model = rf_grid_result.best_estimator_
    y_pred_rf = rf_best_model.predict(X_test)
    mse_rf = mean_squared_error(y_test, y_pred_rf)
    r2_rf = r2_score(y_test, y_pred_rf)

    print("【随机森林】最佳参数组合：", rf_grid_result.best_params_)
    print("【随机森林】测试集 MSE：", mse_rf)
    print("【随机森林】测试集 R² Score：", r2_rf)

    os.makedirs("../data/models", exist_ok=True)
    joblib.dump(rf_best_model, "../data/models/best_random_forest.pkl")

    # ==============================
    # 深度学习模型调参 (TensorFlow)
    # ==============================
    dnn_model = KerasRegressor(
        model=create_dnn_model,
        input_shape=X_train.shape[1],  # 通过 input_shape 传递输入维度
        optimizer='adam',
        dropout_rate=0.3,
        verbose=0  # 不显示训练过程
    )

    dnn_param_grid = {
        'batch_size': [32, 64],
        'epochs': [50, 100],
        'dropout_rate': [0.2, 0.3, 0.5],
        'optimizer': ['adam', 'rmsprop']
    }

    dnn_grid = GridSearchCV(
        estimator=dnn_model,
        param_grid=dnn_param_grid,
        cv=3,
        scoring='neg_mean_squared_error',
        n_jobs=-1,
        verbose=0  # 不显示搜索过程
    )

    print(" 开始深度学习模型超参数搜索...")
    dnn_grid_result = dnn_grid.fit(X_train, y_train)

    dnn_best_model = dnn_grid_result.best_estimator_
    y_pred_dnn = dnn_best_model.predict(X_test).ravel()
    mse_dnn = mean_squared_error(y_test, y_pred_dnn)
    r2_dnn = r2_score(y_test, y_pred_dnn)

    print("【DNN】最佳参数组合：", dnn_grid_result.best_params_)
    print("【DNN】测试集 MSE：", mse_dnn)
    print("【DNN】测试集 R² Score：", r2_dnn)

    # 保存最佳 DNN 模型
    dnn_best_model.model_.save("../data/models/best_tensorflow_model.h5")

    # 保存预测结果
    result_df = pd.DataFrame({
        "True_Value": y_test,
        "RF_Predicted_Value": y_pred_rf,
        "DNN_Predicted_Value": y_pred_dnn
    })
    output_path = "../data/output/predictions_with_two_models.parquet"
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    result_df.to_parquet(output_path)
    print(f" 已保存双模型预测结果到 {output_path}")
