import os
import joblib
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LassoCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from sklearn.preprocessing import StandardScaler
from xgboost import XGBRegressor, XGBClassifier
from sklearn.model_selection import GridSearchCV

data = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test2.csv')

# data.info()
# data.isnull().sum()

def feature_engineering():
    """
    对给定的数据源，进行特征工程处理，提取出关键的特征
    1.提取出时间特征：小时、月份
    2.提取出相近时间窗口中的负荷特征：step大小窗口的负荷
    3.提取昨日同时刻负荷特征
    4.剔除出现空值的样本
    5.整理时间特征，并返回
    :param data: 数据源
    :param logger: 日志
    :return:
    """
    # 加载数据
    data = pd.read_csv('../data/train.csv')
    test = pd.read_csv('../data/test2.csv')

    # data.info()
    # data.isnull().sum()


    # 数据预处理
    data['OverTime'] = data['OverTime'].map({'Yes': 1, 'No': 0})  # 将 OverTime 列中的 Yes 映射为 1，No 映射为 0
    test['OverTime'] = test['OverTime'].map({'Yes': 1, 'No': 0})  # 对测试数据进行相同的映射

    # 数值类型标准化处理
    scaler = StandardScaler()  # 创建 StandardScaler 对象，用于标准化数据
    numerical_features = ['Age', 'DistanceFromHome', 'MonthlyIncome', 'NumCompaniesWorked', 'TotalWorkingYears',
                          'YearsAtCompany', 'YearsInCurrentRole', 'YearsWithCurrManager']  # 定义需要标准化的数值特征
    data[numerical_features] = scaler.fit_transform(data[numerical_features])  # 对训练数据的数值特征进行标准化

    # 字符串类型数据热编码处理
    categorical_features = ['BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus',
                            'OverTime', 'Over18', 'StandardHours']
    data = pd.get_dummies(data, columns=categorical_features)  # 对训练数据的分类特征进行热编码。

    # 保存数据
    # data.to_csv('../data/train_preprocessed.csv', index=False)

    x = data.iloc[:, 1:]  # 所有行，除最后一列的所有列
    y = data.iloc[:, 0]

    # x, y = pd.DataFrame(data.drop(['Attrition'], axis=1)), data['Attrition']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=23)

    # 嵌入法：随机森林重要性
    rf = RandomForestClassifier(n_estimators=100)
    rf.fit(x_train, y_train)
    importance = rf.feature_importances_
    top_indices = importance.argsort()[-10:][::-1]
    print("随机森林重要特征:", x.columns[top_indices])
    '''
    # 1         OverTime_Yes        0.356742
    # 2          MonthlyIncome        0.298541
    # 3     TotalWorkingYears        0.287413
    # 4       StockOptionLevel        0.265874
    # 5                   Age        0.251236
    # 6      JobSatisfaction        0.238741
    # 7   EnvironmentSatisfaction    0.225487
    # 8         WorkLifeBalance        0.214569
    # 9      YearsAtCompany        0.205412
    # 10    TrainingTimesLastYear    0.198745
    '''


    # # 降采样
    # data_0 = data[data['Attrition'] == 0]  # 从训练数据中选择Attrition=0的样本
    # data_1 = data[data['Attrition'] == 1]  # 从训练数据中选择Attrition=1的样本
    # data_0_downsampled = data_0.sample(n=len(data_1), random_state=23)  # 从 Attrition 为 0 的样本中随机抽取与 Attrition 为 1 的样本数量相同的子集
    # data_downsampled = pd.concat([data_0_downsampled, data_1], axis=0)  # 将采样后的 Attrition 为 0 的样本与 Attrition 为 1 的样本合并。
    # data_downsampled.to_csv('../data/train_downsampled.csv', index=False)
    # # 过采样
    # data_0 = data[data['Attrition'] == 0]
    # data_1 = data[data['Attrition'] == 1]
    # data_1_oversampled = data_1.sample(n=len(data_0), random_state=23, replace=True)
    # data_oversampled = pd.concat([data_0, data_1_oversampled], axis=0)
    # data_oversampled.to_csv('../data/train_oversampled.csv', index=False)



    # 划分训练集和测试集
    # 降采样后的数据
    data_downsampled = pd.read_csv('../data/train_downsampled.csv')
    # 分离特征和目标变量
    x = data_downsampled[['Age', 'OverTime_1', 'MonthlyIncome', 'TotalWorkingYears', 'StockOptionLevel' ,'JobSatisfaction', 'EnvironmentSatisfaction', 'WorkLifeBalance','YearsAtCompany','TrainingTimesLastYear']] # 提取特征列。
    y = data_downsampled['Attrition']  # 提取目标变量列。
    x_dn_train, x_dn_test, y_dn_train, y_dn_test = train_test_split(x, y, test_size=0.2, random_state=53)      # 随机种子: 11, 22, 100, 194
    # 过采样后的数据
    data_oversampled = pd.read_csv('../data/train_oversampled.csv')
    x = data_oversampled[['Age', 'OverTime_1', 'MonthlyIncome', 'TotalWorkingYears', 'StockOptionLevel','JobSatisfaction', 'EnvironmentSatisfaction', 'WorkLifeBalance','YearsAtCompany','TrainingTimesLastYear']]
    y = data_oversampled['Attrition']
    x_over_train, x_over_test, y_over_train, y_over_test = train_test_split(x, y, test_size=0.2, random_state=53)




    # SelectKBest：适用于需要快速选择特征且对统计方法有依赖的情况。
    # SelectFromModel：适用于基于模型的特征选择，特别是当模型提供特征重要性评分时。
    # RFE：适用于需要递归地选择特征的情况。
    # 树模型的特征重要性：适用于树模型，能够提供直观的特征重要性评分。
    # L1 正则化：适用于需要自动选择特征且对模型的系数有要求的情况。
    # 根据具体需求和数据特性，可以选择最适合的特征选择方法

    # # 网格搜索和交叉验证
    # # 2.1 定义参数字典.
    # param_dict = {
    #     'n_estimators': [150, 200, 250, 300],
    #     'learning_rate': [0.02, 0.03, 0.04],
    #     'max_depth': [2, 3, 4],
    #     'min_child_weight': [2, 4, 6]   # 子节点最小权重 1~10
    #     # 'subsample':                    # 样本采样比例   0.6~1.0
    #     # 'colsample_bytree':             # 特征采样比例   0.6~1.0
    #     # 'reg_alpha': [2, 3, 4, 5],
    #     # 'reg_lambda': [2, 3, 4, 5]
    # }
    # # 2.2 创建XGBoost 模型对象.(Extreme Gradient Boosting Tree, 极限梯度提升树)
    # estimator = XGBRegressor()
    # # 2.3 创建网格搜索对象.
    # gs = GridSearchCV(estimator=estimator, param_grid=param_dict, cv=5)
    # # 2.4 模型训练.
    # gs.fit(x_dn_train, y_dn_train)
    # # 2.5 打印最优参数组合
    # print(f'最优参数组合: {gs.best_params_}')


    # 3.模型实例化
    estimator = XGBClassifier(
        n_estimators=400,           # 树的数量
        learning_rate=0.6,         # 学习率
        max_depth=4,                # 树的最大深度
        min_child_weight=2,          # 子节点最小权重
        reg_alpha=0.5,
        reg_lambda=0.5
    )

    # 4.1 降采样模型训练
    estimator.fit(x_dn_train, y_dn_train)
    y_dn_pred = estimator.predict(x_dn_test)
    # 模型评价
    acc = accuracy_score(y_dn_test, y_dn_pred)
    print(f'降采样socre准确率 = {acc}')

    # 计算AUC值
    auc_value = roc_auc_score(y_dn_test, y_dn_pred)
    print(f"降采样AUC: {auc_value:.4f}")


    # 4.2 过采样模型训练
    estimator.fit(x_over_train, y_over_train)
    y_up_pred = estimator.predict(x_over_test)
    # 模型评价

    acc = accuracy_score(y_over_test, y_up_pred)
    print(f'过采样socre准确率 = {acc}')

    # 计算AUC值
    auc_value = roc_auc_score(y_over_test, y_up_pred)
    print(f"xx过采样AUC: {auc_value:.4f}")

# 5. 测试.
if __name__ == '__main__':
    # ana_data(data)

    feature_engineering()



