import time
import optuna
from matplotlib import pyplot as plt
from optuna.samplers import TPESampler
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score, f1_score, confusion_matrix, \
    roc_curve, precision_recall_curve
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
from sklearn.ensemble import VotingClassifier
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import LabelEncoder, StandardScaler
import seaborn as sns
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'SimSun', 'FangSong', 'KaiTi']  # 指定一系列备选的中文字体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号'-'显示为方块的问题
# 显示所有列
pd.set_option('display.max_columns', None)
# # 显示所有行
# pd.set_option('display.max_rows', None)
# # 不换行显示
pd.set_option('display.width', 1000)
# 行列对齐显示，显示不混乱
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
# 设置value的显示长度为100，默认为50
pd.set_option('max_colwidth', 100)

data1 = pd.read_csv('../data/raw/train.csv')
data2 = pd.read_csv('../data/raw/test2.csv')

# print(f'{data1.head()}\n{data2.head()}')
# data3.info()
# data4.info()
# # 特征提取
columns = ['Age','BusinessTravel', 'Department', 'DistanceFromHome', 'Education',
           'EnvironmentSatisfaction','Gender', 'JobInvolvement', 'JobLevel', 'JobRole',
           'JobSatisfaction','MaritalStatus', 'MonthlyIncome','NumCompaniesWorked',
           'OverTime', 'PercentSalaryHike','PerformanceRating', 'StockOptionLevel',
           'WorkLifeBalance', 'YearsAtCompany', 'YearsSinceLastPromotion', 'RelationshipSatisfaction']

x1 = data1[columns].copy()
y_train = data1.iloc[:, 0].copy()
x2 = data2[columns].copy()
y_test = data2.iloc[:, -1].copy()

# x1['IncomePerYear'] = x1['MonthlyIncome'] / (x1['YearsAtCompany'] + 1)
# x1['PromotionRatio'] = x1['YearsSinceLastPromotion'] / (x1['YearsAtCompany'] + 1)
# x2['IncomePerYear'] = x2['MonthlyIncome'] / (x2['YearsAtCompany'] + 1)
# x2['PromotionRatio'] = x2['YearsSinceLastPromotion'] / (x2['YearsAtCompany'] + 1)

# 对数据进行缺失值填充


# 热编码
X_train = pd.get_dummies(x1)
# x1.info()
X_test = pd.get_dummies(x2)
# x2.info()


# x = pd.concat([x1, x2], axis=0)
# y = pd.concat([y1, y2], axis=0)
# print(x.head(),y.head())
# 划分训练集和测试集
# X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=808)


scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)


# 使用sklearn的VotingClassifier简化集成
class SimplifiedEnsemble:
    def __init__(self, xgb_params=None, lr_params=None):
        if xgb_params is None:
            xgb_params = {
                'objective': 'binary:logistic',
                'eval_metric': 'logloss',
                'random_state': 42
            }

        if lr_params is None:
            lr_params = {'random_state': 42, 'max_iter': 100000, 'C': 10.0, 'solver': 'newton-cg'}

        # 初始化XGBoost分类器模型，使用传入的参数配置
        self.xgb_model = xgb.XGBClassifier(**xgb_params)

        # 初始化逻辑回归分类器模型，使用传入的参数配置
        self.lr_model = LogisticRegression(**lr_params)

        # 构建投票集成分类器，将XGBoost和逻辑回归模型进行软投票集成
        # estimators: 包含多个基分类器的列表，每个元素为(名称, 分类器实例)的元组
        # voting: 投票方式，'soft'表示使用预测概率进行加权投票
        self.ensemble = VotingClassifier(
            estimators=[
                ('xgb', self.xgb_model),
                ('lr', self.lr_model)
            ],
            voting='soft'
        )

    def fit(self, X, y):
        self.ensemble.fit(X, y)
        return self

    def predict(self, X):
        return self.ensemble.predict(X)

    def predict_proba(self, X):
        return self.ensemble.predict_proba(X)


# 简化的目标函数

def ana_data(data):
    # print(data.info())
    #1.总体分布
    a_data = data.copy()
    fig = plt.figure(figsize=(10, 10))
    ax1 = fig.add_subplot()
    status_count = a_data['Attrition'].value_counts()
    print(status_count)
    ax1.pie(status_count, labels=["未离职","离职"], autopct='%.2f%%')
    ax1.set_title("离职总体分布情况")

    numeric_features = ['Age', 'Department', 'DistanceFromHome', 'Education', 'EnvironmentSatisfaction', 'Gender',
               'JobInvolvement','JobLevel', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'OverTime',
               'PercentSalaryHike', 'StockOptionLevel', 'WorkLifeBalance', 'YearsAtCompany', 'YearsSinceLastPromotion',
               'RelationshipSatisfaction']
    # numeric_features.remove('Attrition')
    plt.figure(figsize=(16, 12))
    for i, col in enumerate(numeric_features[:]):
        plt.subplot(4, 5, i + 1)
        sns.histplot(a_data[col], kde=True,color='green')
        plt.title(f'{col}分布')
    plt.tight_layout()
    # plt.savefig('特征的分布.png')
    plt.show()

    # 2.相关性分布
    # 2.1 数值型特征与离职率相关性
    numeric_features_num = ['Age',  'DistanceFromHome', 'Education', 'EnvironmentSatisfaction','JobInvolvement','JobLevel',  'JobSatisfaction', 'MonthlyIncome', 'PercentSalaryHike', 'StockOptionLevel', 'WorkLifeBalance', 'YearsAtCompany', 'YearsSinceLastPromotion',
               'RelationshipSatisfaction']
    plt.figure(figsize=(16, 12))
    for i,col in enumerate(numeric_features_num[:]):
        plt.subplot(4,5,i+1)
        sns.kdeplot(data = a_data,x=col, hue="Attrition",fill=True,common_norm=False, alpha=0.6, palette=['skyblue', 'orange'])
        plt.title(f'{col}与离职率的相关性分布')
        plt.legend(title='Attrition', labels=['离职', '未离职'])
    plt.tight_layout()
    # plt.savefig('数值型特征与离职率相关性.png')
    plt.show()

    # 2.2 分类型特征与离职率相关性
    categorical_features = ['BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus', 'Over18', 'OverTime']
    plt.figure(figsize=(16, 15))  # 调整图像大小以适应更多子图
    for i, col in enumerate(categorical_features):
        plt.subplot(3, 3, i + 1)  # 修改为3行3列，最多可容纳9个子图
        sns.countplot(data=a_data, x=col, hue='Attrition')
        plt.title(f'{col}与离职率的相关性分布')
        plt.legend(title='Attrition', labels=['离职', '未离职'])
    plt.tight_layout()
    # plt.savefig('分类型特征与离职率相关性.png')
    plt.show()

def evaluate_model(model, X_test, y_test, X_train=None, y_train=None, roc_auc=None):
    y_pre = model.predict(X_test)
    cm = confusion_matrix(y_test, y_pred)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.title('混淆矩阵')
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    # plt.savefig('混淆矩阵.png')
    plt.show()
    y_pred_proba = model.predict_proba(X_test)[:, 1]
    fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC曲线 (AUC = {roc_auc:.2f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='随机分类器')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假正率 (False Positive Rate)')
    plt.ylabel('真正率 (True Positive Rate)')
    plt.title('ROC曲线')
    plt.legend(loc="lower right")
    plt.grid(True)

    precision, recall, _ = precision_recall_curve(y_test, y_pred_proba)

    plt.figure(figsize=(10, 8))
    plt.plot(recall, precision, color='blue', lw=2)
    plt.xlabel('召回率')
    plt.ylabel('精确率')
    plt.title('精确率-召回率曲线')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.savefig('precision_recall_curve.png')
    plt.show()
def objective_ensemble(trial):
    xgb_param = {
        'reg_alpha': trial.suggest_float('reg_alpha', 1e-8, 10.0, log=True),
        'reg_lambda': trial.suggest_float('reg_lambda', 1e-8, 10.0, log=True),
        'learning_rate': trial.suggest_float('learning_rate', 1e-4, 0.5, log=True),
        'n_estimators': trial.suggest_int('n_estimators', 1, 500),
        'subsample': trial.suggest_float('subsample', 0.6, 1.0),
        'colsample_bytree': trial.suggest_float('colsample_bytree', 0.6, 1.0),
        'random_state': trial.suggest_int('random_state', 0, 999)
        # 固定种子     'early_stopping_rounds': 50  # 新增早停参数
    }

    model = SimplifiedEnsemble(xgb_params=xgb_param)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    return accuracy_score(y_test, y_pred)


# 优化过程
sampler_ensemble = TPESampler(seed=50)
study_ensemble = optuna.create_study(direction='maximize', sampler=sampler_ensemble)
print("开始超参数优化...")
study_ensemble.optimize(objective_ensemble, n_trials=400)  # 减少试验次数以加快速度

# 输出结果
print("最佳准确率: {:.4f}".format(study_ensemble.best_value))
print("最佳参数:", study_ensemble.best_params)

# 训练最终模型
best_model = SimplifiedEnsemble(xgb_params=study_ensemble.best_params)
best_model.fit(X_train, y_train)

# 计算准确率和AUC
# 计算评估指标（记得使用predict_proba）
y_pred = best_model.predict(X_test)
y_pred_proba = best_model.predict_proba(X_test)[:, 1]  # 获取正类的概率
final_acc = accuracy_score(y_test, y_pred)
final_auc = roc_auc_score(y_test, y_pred_proba)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)

# 如果提供了训练集，评估过拟合情况
if X_train is not None and y_train is not None:
    train_pred = best_model.predict(X_train)
    train_accuracy = accuracy_score(y_train, train_pred)
    print(f"\n训练集准确率: {train_accuracy:.4f}")
    print(f"测试集准确率: {final_acc:.4f}")
    if train_accuracy - final_acc > 0.1:
        print("⚠️ 模型可能存在过拟合")
    else:
        print("✅ 模型泛化能力良好")
evaluate_model(best_model, X_test, y_test, X_train, y_train)
print("测试集性能指标:")
print(f"精确率 (Precision): {precision:.4f}")
print(f"召回率 (Recall): {recall:.4f}")
print(f"F1 分数: {f1:.4f}")
print("最终模型准确率: {:.4f}".format(final_acc))
print("最终模型AUC值: {:.4f}".format(final_auc))
