# 绘图库
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt

import numpy as np
import pandas as pd

best_train = pd.read_csv(r'./data/特征工程/best_train.csv')
best_test = pd.read_csv(r'./data/特征工程/best_test.csv')

# 5.建模
# 现在开始通过对训练集进行训练模型，来预测客户是否会流失
# 5.1 lightGBM
# 导入相关库
import optuna
# 对数据进行训练之前检测出不太好的超参数集，从而显着减少搜索时间
from optuna.integration import LightGBMPruningCallback
# K折交叉验证
from sklearn.model_selection import StratifiedKFold,KFold
# 训练集和测试集分割
from sklearn.model_selection import train_test_split
import lightgbm as lgbm
# 用于保存和提取模型
import joblib

# 特征
x = best_train
# 训练集对应的标签
labeldf = pd.read_csv(r'./data/label.csv')
y = labeldf['是否流失']

# 参数：trial,特征,标签,需要优化的超参数，交叉验证次数
def objective(trial, x, y,fold_time):
    # 参数填充区域(此处根据需要修改)
    # 需要调优的参数
    params_grid = {'n_estimators':trial.suggest_int('n_estimators',100,1500),
               'learning_rate':trial.suggest_float('learning_rate',0.01,0.3), # 学习率
               'num_leaves':trial.suggest_int('num_leaves',10,100), # 一棵树的最大叶子数
               'max_depth':trial.suggest_int('max_depth',3,100), # 树模型的最大深度
               'min_data_in_leaf':trial.suggest_int('min_data_in_leaf',10,100), # 一个叶子中的最小数据数
               'max_bin':trial.suggest_int('max_bin',200,300), # 存储特征值的最大 bin 数
               "lambda_l1": trial.suggest_int("lambda_l1", 0, 100, step=5), # L1 正则化
               "lambda_l2": trial.suggest_int("lambda_l2", 0, 100, step=5), # L2 正则化
               "min_gain_to_split": trial.suggest_float("min_gain_to_split", 0, 15), # 执行拆分的最小增益
               "bagging_fraction": trial.suggest_float("bagging_fraction", 0.2, 1.0, step=0.1), # 随机选择部分数据而不重新采样
               "bagging_freq": trial.suggest_int("bagging_freq",1,20), # 每k次迭代执行bagging
               "feature_fraction": trial.suggest_float("feature_fraction", 0.2, 1.0, step=0.1) # 选择特征比例
               }
    # 交叉验证设置(回归用KFold，分类用StratifiedKFlod)
    cv = StratifiedKFold(n_splits=fold_time, shuffle=True, random_state=2022)
    # 此处通过创建空数组用于记录预测分数
    # cv_scores = np.empty(fold_time)
    cv_scores = np.zeros(fold_time)
    # 训练集和测试集的划分
    for idx, (train_idx, test_idx) in enumerate(cv.split(x, y)):
        X_train, X_test = x.iloc[train_idx], x.iloc[test_idx]
        y_train, y_test = y.iloc[train_idx], y.iloc[test_idx]
        # lightGBM的分类器/回归器初始化(此处根据需要修改)
        model = lgbm.LGBMClassifier(boosting = 'gbdt',
                                    objective='binary',
                                    n_jobs = -1,
                                    force_row_wise = True,
                                    **params_grid)
        # 填充训练数据进行测试
        model.fit(
            X_train,
            y_train,
            eval_set=[(X_test, y_test)],
            eval_metric='auc',
            early_stopping_rounds=50,
            callbacks = [LightGBMPruningCallback(trial,'auc')],# 对数据进行训练之前检测出不太好的超参数集，从而显着减少搜索时间。
            verbose = False # 不显示训练过程
        )
        # 获得模型的预测分数
        pred_score = model.score(X_test,y_test)
        # 将预测分数填入空数组中
        cv_scores[idx] = pred_score
    # 返回预测平均值
    return np.mean(cv_scores)

print('正在运行中--------->')
# 设置完目标函数，开始调参
# direction:设置minimize最小化和maximize最大化
study = optuna.create_study(study_name = 'LGBMClassifier',direction = 'maximize')
# 调用objective函数
func = lambda trial:objective(trial,x,y,fold_time = 5)
# 运行的总 trial 数目,(n_trials根据需要修改)
study.optimize(func,n_trials = 500)
print('运行成功~')

# CPU times: user 6h 56min 8s, sys: 1min 37s, total: 6h 57min 45s
# Wall time: 1h 10s

#5.1.1 训练结果
print(f'最佳auc:{study.best_value}')
print('模型最佳参数:')
for key,value in study.best_params.items():
    print(f'{key} = {value}')

print('正在运行中~')
# 划分训练集和测试集
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25)
LGBMC = lgbm.LGBMClassifier(boosting_type = 'gbdt',
                        objective='binary',
                        n_jobs = -1,
                        force_row_wise = True,
                        n_estimators = 868,
                        learning_rate = 0.2736802462730599,
                        num_leaves = 100,
                        max_depth = 69,
                        min_data_in_leaf = 51,
                        max_bin = 242,
                        lambda_l1 = 0,
                        lambda_l2 = 0,
                        min_gain_to_split = 0.02168770715660931,
                        bagging_fraction = 1.0,
                        bagging_freq = 7,
                        feature_fraction = 0.9000000000000001)
LGBMC.fit(x_train,y_train)
# 填充数据测试
print('运行成功~')

# 保存模型
joblib.dump(LGBMC,r'./data/建模/lgbmc6-30.pkl')

# 5.1.2 模型应用
# 读取模型
# LGBMC = joblib.load(r'./data/建模/lgbmc6-30.pkl')
# 用模型对测试集预测
y_pred = LGBMC.predict(best_test)
# 提取测试集的客户ID
IDdf = pd.read_csv(r'./data/ID.csv')
test_ID = IDdf.iloc[150000:,:]
# 构建dataframe
lgb_submitdf = pd.DataFrame({'客户ID':test_ID['客户ID'],'是否流失':y_pred})
lgb_submitdf.to_csv(r'./data/预测结果/submit6-30-1.csv',index = False,encoding = 'utf-8')
print(lgb_submitdf.head())

