import optuna
from optuna.samplers import TPESampler
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
from sklearn.ensemble import VotingClassifier
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import LabelEncoder, StandardScaler

data1 = pd.read_csv('./03_数据集/train.csv')
data2 = pd.read_csv('./03_数据集/test2.csv')

 # print(f'{data1.head()}\n{data2.head()}')
 # data3.info()
 # data4.info()
 # 特征提取
columns = ['Age', 'Department', 'DistanceFromHome', 'Education', 'EnvironmentSatisfaction', 'Gender', 'JobInvolvement',
           'JobLevel', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'OverTime', 'PercentSalaryHike'
    , 'StockOptionLevel', 'WorkLifeBalance', 'YearsAtCompany', 'YearsSinceLastPromotion', 'RelationshipSatisfaction']


x1 = data1[columns].copy()
y_train = data1.iloc[:, 0].copy()
x2 = data2[columns].copy()
y_test = data2.iloc[:, -1].copy()

x1['IncomePerYear'] = x1['MonthlyIncome'] / (x1['YearsAtCompany'] + 1)
x1['PromotionRatio'] = x1['YearsSinceLastPromotion'] / (x1['YearsAtCompany'] + 1)
x2['IncomePerYear'] = x2['MonthlyIncome'] / (x2['YearsAtCompany'] + 1)
x2['PromotionRatio'] = x2['YearsSinceLastPromotion'] / (x2['YearsAtCompany'] + 1)

# 对数据进行缺失值填充

# 热编码
X_train = pd.get_dummies(x1)
# x1.info()
X_test = pd.get_dummies(x2)
# x2.info()


# 对数据进行缺失值填充

# 热编码
x1 = pd.get_dummies(x1)
# x1.info()
x2 = pd.get_dummies(x2)
# x2.info()
# print(x1.head(),y1.head(),x2.head(),y2.head())
# print(x1.head(),x2.head())
# x = pd.concat([x1, x2], axis=0)
# y = pd.concat([y1, y2], axis=0)
# print(x.head(),y.head())


scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 使用sklearn的VotingClassifier简化集成
class SimplifiedEnsemble:
    def __init__(self, xgb_params=None, lr_params=None):
        if xgb_params is None:
            xgb_params = {
                'objective': 'binary:logistic',
                'eval_metric': 'logloss',
                'random_state': 42
            }

        if lr_params is None:
            lr_params = {'random_state': 808, 'max_iter':1000000,'C': 5.0, 'solver': 'lbfgs'}

        # 初始化XGBoost分类器模型，使用传入的参数配置
        self.xgb_model = xgb.XGBClassifier(**xgb_params)

        # 初始化逻辑回归分类器模型，使用传入的参数配置
        self.lr_model = LogisticRegression(**lr_params)

        # 构建投票集成分类器，将XGBoost和逻辑回归模型进行软投票集成
        # estimators: 包含多个基分类器的列表，每个元素为(名称, 分类器实例)的元组
        # voting: 投票方式，'soft'表示使用预测概率进行加权投票
        self.ensemble = VotingClassifier(
            estimators=[
                ('xgb', self.xgb_model),
                ('lr', self.lr_model)
            ],
            voting='soft'
        )


    def fit(self, X, y):
        self.ensemble.fit(X, y)
        return self

    def predict(self, X):
        return self.ensemble.predict(X)

    def predict_proba(self, X):
        return self.ensemble.predict_proba(X)


# 简化的目标函数
def objective_ensemble(trial):
    xgb_param = {
        'reg_alpha': trial.suggest_float('reg_alpha', 1e-8, 10.0, log=True),
        'reg_lambda': trial.suggest_float('reg_lambda', 1e-8, 10.0, log=True),
        'learning_rate': trial.suggest_float('learning_rate', 1e-4, 0.1, log=True),
        'n_estimators': trial.suggest_int('n_estimators', 1, 1000),
        'subsample': trial.suggest_float('subsample', 0.3, 1.0),
        'colsample_bytree': trial.suggest_float('colsample_bytree', 0.3, 1.0),
        'random_state': trial.suggest_int('random_state', 0, 999)
    }
    lr_params = {
        'C': trial.suggest_float('C', 1e-4, 100.0, log=True),
        'max_iter': trial.suggest_int('max_iter', 1000, 100000),
        'tol': trial.suggest_float('tol', 1e-5, 1e-3, log=True),
        'class_weight': trial.suggest_categorical('class_weight', [None, 'balanced']),
        'random_state': trial.suggest_int('random_state', 0, 999)
    }
    model = SimplifiedEnsemble(xgb_params=xgb_param,lr_params=lr_params)
    model.fit(X_train, y_train)
    # 使用 predict_proba 获取预测概率，计算 AUC
    y_pred_proba = model.predict_proba(X_test)[:,1]  # 获取正类的概率
    auc = roc_auc_score(y_test, y_pred_proba)
    return auc

sampler_ensemble = TPESampler(seed=55)
study_ensemble = optuna.create_study(direction='maximize', sampler=sampler_ensemble)
print("开始超参数优化（以AUC为指标）")
study_ensemble.optimize(objective_ensemble, n_trials=200)

# 输出结果
print("最佳AUC: {:.4f}".format(study_ensemble.best_value))
print("最佳参数:", study_ensemble.best_params)

# 训练最终模型
best_model = SimplifiedEnsemble(xgb_params=study_ensemble.best_params)
best_model.fit(X_train, y_train)

# 计算准确率和AUC
y_pred = best_model.predict(X_test)
y_pred_proba = best_model.predict_proba(X_test)[:, 1]  # 获取正类的概率

final_acc = accuracy_score(y_test, y_pred)
final_auc = roc_auc_score(y_test, y_pred_proba)

print("最终模型准确率: {:.4f}".format(final_acc))
print("最终模型AUC值: {:.4f}".format(final_auc))