# import datetime
# import numpy as np
# import pandas as pd
# import seaborn as sns
# import matplotlib.pyplot as plt
# from sklearn.metrics import roc_auc_score
# from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import StandardScaler
#
# from utils.common import data_preprocessing
# from utils.log import Logger
# from sklearn.linear_model import LogisticRegression
# import xgboost as xgb
# import joblib
# from sklearn.feature_selection import mutual_info_classif
# plt.rcParams['font.family'] = 'SimHei'
# plt.rcParams['font.size'] = 15
#
#
# class PowerLoadModel:
#     def __init__(self, path):
#         self.path = path
#         logfile_name = 'train_'+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'.pkl'
#         self.logfile =  Logger('../',logfile_name).get_logger()
#         self.logfile.info(f'开始创建电力负荷模型类的对象了..')
#         self.data_source = data_preprocessing(path)
# # df = pd.read_csv('../data/train.csv')
# # df = df.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1)
#
# def  feature_engineering(data,logger):
#     data = data.copy()
#     # 训练集
#     X_train = data.drop('Attrition', axis=1)
#     y = data['Attrition']
#     # 识别非数值型的列（即分类特征）
#     categorical_features = X_train.select_dtypes(include=['object']).columns
#     # 对分类特征进行独热编码
#     X_train_encoded = pd.get_dummies(X_train[categorical_features])
#     # 接下来，我们需要合并独热编码的特征回原始的数值型特征中
#     # 首先，识别数值型的列
#     numerical_features = X_train.select_dtypes(exclude=['object']).columns
#     # 选择数值型特征
#     X_train_numerical = X_train[numerical_features]
#     # 合并数值型特征和独热编码的特征
#     X_train = pd.concat([X_train_numerical, X_train_encoded], axis=1)
#     # 现在 X_train_encoded 和 X_test_encoded 包含了独热编码后的特征
#
#     # 计算特征集 X_train 和目标变量 y_train 之间的互信息
#     mutual_info = mutual_info_classif(X_train, y)
#     #sorted_mutual_info = mutual_info.sort_values(ascending=False)
#     # 互信息数组转换成一个Pandas Series 对象
#     mutual_info = pd.Series(mutual_info)
#     # 将 Series 对象的索引设置为 X_train 的列名
#     mutual_info.index = X_train.columns
#     # 对 Series 对象中的互信息值进行降序排序
#     mutual_info.sort_values(ascending=True)
#     plt.title("Feature Importance", fontsize=10)
#     mutual_info.sort_values().plot(kind='barh', figsize=(12, 9), color='r', fontsize=10)
#     plt.show()
#     sorted_mutual_info = mutual_info.sort_values(ascending=False)
#     # 获取互信息值最低的20个特征的索引（列名）
#     least_important_feature_indices = sorted_mutual_info.tail(17).index
#     # 从new_df中删除这些特征
#     X_train_drop = X_train.drop(columns=least_important_feature_indices)
#     print(X_train_drop.info())
#     print('-' * 35)
#     print(y.info())
#     return  X_train_drop, y
#     # 接下来是模型训练和评估的代码...
# # XGBoost模型的交叉验证网格搜索
# # 数据分割
# def model_train(X_train_drop,y,logger):
#     x_train, x_test, y_train, y_test = train_test_split(X_train_drop, y, test_size=0.2, random_state=119)
#     # 第一阶段：训练XGBoost模型
#     xgb_model = xgb.XGBClassifier(
#         learning_rate=0.15,
#         max_depth=2,
#         min_child_weight=11,
#         n_estimators=150,
#         subsample=0.7,
#         random_state=29
#     )
#
#     xgb_model.fit(x_train, y_train)
#
#     # 获取XGBoost的输出作为特征
#     x_train_xgb = xgb_model.predict_proba(x_train)[:, 1].reshape(-1, 1)
#     x_test_xgb = xgb_model.predict_proba(x_test)[:, 1].reshape(-1, 1)
#
#     # 可以选择添加原始特征，增强模型能力
#     x_train_combined = np.hstack([x_train_xgb, x_train])
#     x_test_combined = np.hstack([x_test_xgb, x_test])
#
#     # 数据标准化 (逻辑回归对特征尺度敏感)
#     scaler = StandardScaler()
#     x_train_scaled = scaler.fit_transform(x_train_combined)
#     x_test_scaled = scaler.transform(x_test_combined)
#
#     # 第二阶段：训练逻辑回归模型
#     lr_model = LogisticRegression(
#         C=1.0,
#         penalty='l2',
#         solver='liblinear',
#         random_state=29
#     )
#
#     lr_model.fit(x_train_scaled, y_train)
#
#     # 预测和评估
#     y_pred_lr = lr_model.predict(x_test_scaled)
#     y_pred_prob_lr = lr_model.predict_proba(x_test_scaled)[:, 1]
#     auc_lr = roc_auc_score(y_test, y_pred_prob_lr)
#
#     # 单独评估XGBoost模型
#     y_pred_xgb = xgb_model.predict(x_test)
#     y_pred_prob_xgb = xgb_model.predict_proba(x_test)[:, 1]
#     auc_xgb = roc_auc_score(y_test, y_pred_prob_xgb)
#
#     print(f'XGBoost AUC: {auc_xgb}')
#     print(f'集成模型 AUC: {auc_lr}')
#     print(f'提升: {auc_lr - auc_xgb:.6f}')
#
#     # 模型的保存
#     joblib.dump(xgb_model, '../model/xgb_model.pkl')
#
#
# def plot_feature_importance(self, mutual_info):
#     """绘制特征重要性图并保存"""
#     plt.figure(figsize=(12, 9))
#     plt.title("特征重要性", fontsize=15)
#     mutual_info.sort_values().plot(kind='barh', color='r', fontsize=10)
#     plt.tight_layout()
#
#     # 保存图片
#     save_path = '../data/fig/feature_importance.png'
#     plt.savefig(save_path)
#     self.logfile.info(f"特征重要性图已保存至: {save_path}")
#     plt.close()
# if __name__ == '__main__':
#     pm = PowerLoadModel('../data/train.csv')
#     X_train_drop, y = feature_engineering(pm.data_source, pm.logfile)
#     print(y)
#     model_train(X_train_drop,y, pm.logfile)



#--------------------------------------------
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import logging
import os
import joblib
from sklearn.feature_selection import mutual_info_classif
import xgboost as xgb
from sklearn.utils import class_weight
from sklearn.linear_model import LogisticRegression

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15

class PowerLoadModel:
    def __init__(self, path):
        self.path = path
        self.data_source = pd.read_csv(self.path)  # 读取数据并赋值给 self.data_source
        self.data_source = self.data_source.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1)
        self.logfile = self.setup_logger()

    def setup_logger(self):
        log_dir = '../logs'
        os.makedirs(log_dir, exist_ok=True)
        log_file = f'{log_dir}/train_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}.log'
        logging.basicConfig(
            filename=log_file,
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s'
        )
        return logging.getLogger(__name__)

def feature_engineering(data, logger):
    if data is None:
        logger.error("输入数据为 None，无法进行特征工程")
        return None, None

    data = data.copy()
    X_train = data.drop('Attrition', axis=1)
    y = data['Attrition']
    categorical_features = X_train.select_dtypes(include=['object']).columns
    X_train_encoded = pd.get_dummies(X_train[categorical_features])
    numerical_features = X_train.select_dtypes(exclude=['object']).columns
    X_train_numerical = X_train[numerical_features]
    X_train = pd.concat([X_train_numerical, X_train_encoded], axis=1)

    mutual_info = mutual_info_classif(X_train, y,random_state=76)
    mutual_info = pd.Series(mutual_info, index=X_train.columns).sort_values(ascending=False)
    mutual_info.sort_values().plot(kind='barh', figsize=(12, 9), color='r', fontsize=10)


    plt.title("Feature Importance", fontsize=15)
    plt.tight_layout()
    plt.savefig('feature_importance.png')
    plt.close()

    least_important = mutual_info.tail(17).index
    X_train_drop = X_train.drop(columns=least_important)
    logger.info("特征工程完成")
    return X_train_drop, y

def model_train(X_train_drop, y):
    x_train, x_test, y_train, y_test = train_test_split(X_train_drop, y, test_size=0.2, random_state=119)

    xgb_model = xgb.XGBClassifier(
        reg_alpha=0.3,  # l1
        reg_lambda=2.00,     #l2
        learning_rate=0.15,
        max_depth=2,
        min_child_weight=11,
        n_estimators=150,
        subsample=0.7,
        random_state=29
    )

    xgb_model_w = class_weight.compute_sample_weight("balanced", y_train)
    xgb_model.fit(x_train, y_train)

    x_train_xgb = xgb_model.predict_proba(x_train)[:, 1].reshape(-1, 1)
    x_test_xgb = xgb_model.predict_proba(x_test)[:, 1].reshape(-1, 1)

    x_train_combined = np.hstack([x_train_xgb, x_train])
    x_test_combined = np.hstack([x_test_xgb, x_test])

    scaler = StandardScaler()
    x_train_scaled = scaler.fit_transform(x_train_combined)
    x_test_scaled = scaler.transform(x_test_combined)

    lr_model = LogisticRegression(
        penalty='l2',
        C=1.0,
        solver='liblinear',
        random_state=29
    )


    lr_model.fit(x_train_scaled, y_train)

    y_pred_prob_xgb = xgb_model.predict_proba(x_test)[:, 1]
    auc_xgb = roc_auc_score(y_test, y_pred_prob_xgb)

    y_pred_prob_lr = lr_model.predict_proba(x_test_scaled)[:, 1]
    auc_lr = roc_auc_score(y_test, y_pred_prob_lr)

    print(f'XGBoost AUC: {auc_xgb:.4f}')
    print(f'集成模型 AUC: {auc_lr:.4f}')
    print(f'提升: {auc_lr - auc_xgb:.6f}')
    print(f'样本权重：{xgb_model_w}')
    os.makedirs('../model', exist_ok=True)
    joblib.dump(xgb_model, '../model/xgb_model.pkl')
    joblib.dump(lr_model, '../model/lr_model.pkl')

if __name__ == '__main__':
    pm = PowerLoadModel('./train.csv')
    X_train_drop, y = feature_engineering(pm.data_source, pm.logfile)  # 传入 pm.data_source
    if X_train_drop is not None and y is not None:
        model_train(X_train_drop, y)
    else:
        print("特征工程失败，模型训练终止。")