# 0.导包
import json
import time
import lightgbm as lgb
import pandas as pd
import numpy as np
import re
import matplotlib.pylab as plt
from sklearn import metrics
from sklearn.metrics import mean_squared_error
import scorecardpy as sc
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from collections import Counter
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
try:
    import cPickle as pickle
except BaseException:
    import pickle
import os
import warnings
warnings.filterwarnings("ignore")

# 0.1函数
def psi_stats_score(data_left, data_right, non_computed=None, plot_image=True):
    """Calculate Average PSI value.
    Parameters
    ----------
    data_left: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    data_right: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    non_computed : str or None
        The column name of non-computed scoring indicators.
        'True' means score by non_computed.
        'False' means score by computed.

    plot_image : bool (default True)
        Plot image.

    Returns
    -------
    psi_table : pandas.DataFrame
        The PSI value of score interval.
    """

    """Check columns of data."""
    check_cols = ['score', 'y', 'id']
    if non_computed != None and type(non_computed) == str:
        check_cols += [non_computed]
        data_left = data_left[~data_left[non_computed] == True].copy()
        data_right = data_right[~data_right[non_computed] == True].copy()
    elif non_computed == None:
        pass
    else:
        raise ValueError('non_computed must be a str.')
    for col in check_cols:
        if col not in data_left.columns or col not in data_right.columns:
            raise ValueError('Please check the columns %s of data' % col)

    """Drop NaN by column 'score'."""
    data_left = data_left.loc[data_left['score'].notnull(), check_cols]
    data_right = data_right.loc[data_right['score'].notnull(), check_cols]

    """Discrete score value."""
    break_points = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
    data_left.loc[data_left.score < 0, 'score'] = 0
    data_left.loc[data_left.score >= 100, 'score'] = 99
    data_right.loc[data_right.score < 0, 'score'] = 0
    data_right.loc[data_right.score >= 100, 'score'] = 99
    data_left['score'] = pd.cut(data_left['score'], break_points, right=False).values
    data_right['score'] = pd.cut(data_right['score'], break_points, right=False).values

    """Count psi of bad & good sample."""
    count_left = data_left.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_right = data_right.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_left['bad_ratio'] = count_left[1] / count_left[1].sum()
    count_right['bad_ratio'] = count_right[1] / count_right[1].sum()
    count_left['good_ratio'] = count_left[0] / count_left[0].sum()
    count_right['good_ratio'] = count_right[0] / count_right[0].sum()
    count_final = pd.merge(count_left, count_right, left_index=True,
                           right_index=True, suffixes=['_left', '_right'])
    count_final['psi_bad'] = (count_left['bad_ratio'] - count_right['bad_ratio']) * \
                             np.log(count_left['bad_ratio'] / count_right['bad_ratio'])
    count_final['psi_good'] = (count_left['good_ratio'] - count_right['good_ratio']) * \
                              np.log(count_left['good_ratio'] / count_right['good_ratio'])

    average_psi = (count_final['psi_bad'].replace([np.inf, np.nan], 0.0).sum() + \
                   count_final['psi_good'].replace([np.inf, np.nan], 0.0).sum()) / 2

    """Plot image"""
    if plot_image == True:
        plot_range = ['bad_ratio_left', 'good_ratio_left',
                      'bad_ratio_right', 'good_ratio_right']
        plot_label = ['Bad Ratio of Test Sample', 'Good Ratio of Test Sample',
                      'Bad Ratio of Train Set', 'Good Ratio of Train Set']
        color = ['blue', 'red', 'green', 'cyan']
        marker = ['s', 'x', 'o', 'v']
        plt.figure()
        for p, l, c, m in zip(plot_range, plot_label, color, marker):
            value = count_final[p].values
            score_range = range(len(count_final.index))
            score_label = ['[0,10)', '[10,20)',
                           '[20,30)', '[30,40)',
                           '[40,50]', '[50,60)',
                           '[60,70)', '[70,80]',
                           '[80,90]', '[90,100)']
            plt.plot(score_range, value, color=c, marker=m,
                     markersize=1, label=l)
        plt.grid()
        plt.legend(loc='upper left')
        plt.xticks(score_range, score_label, rotation=45)
        plt.title('PSI of Score Card')
        plt.ylabel('Ratio')
        plt.tight_layout()
        plt.show()
    print('Average PSI:%f' % average_psi)
    return count_final

#定义KS计算函数
from sklearn.metrics import roc_curve
def calKS(y,y_pred):
    fpr, tpr, threshold = metrics.roc_curve(y, y_pred)
    return max(tpr - fpr)

def psi_stats_score1(data_left, data_right, non_computed=None, plot_image=True):
    """Calculate Average PSI value.
    Parameters
    ----------
    data_left: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    data_right: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    non_computed : str or None
        The column name of non-computed scoring indicators.
        'True' means score by non_computed.
        'False' means score by computed.

    plot_image : bool (default True)
        Plot image.

    Returns
    -------
    psi_table : pandas.DataFrame
        The PSI value of score interval.
    """

    """Check columns of data."""
    check_cols = ['score', 'y', 'id']
    if non_computed != None and type(non_computed) == str:
        check_cols += [non_computed]
        data_left = data_left[~data_left[non_computed] == True].copy()
        data_right = data_right[~data_right[non_computed] == True].copy()
    elif non_computed == None:
        pass
    else:
        raise ValueError('non_computed must be a str.')
    for col in check_cols:
        if col not in data_left.columns or col not in data_right.columns:
            raise ValueError('Please check the columns %s of data' % col)

    """Drop NaN by column 'score'."""
    data_left = data_left.loc[data_left['score'].notnull(), check_cols]
    data_right = data_right.loc[data_right['score'].notnull(), check_cols]

    """Discrete score value."""
    break_points = list(range(300,1001,50))
    data_left.loc[data_left.score < 300, 'score'] = 300
    data_left.loc[data_left.score >= 1000, 'score'] = 999
    data_right.loc[data_right.score < 300, 'score'] = 300
    data_right.loc[data_right.score >= 1000, 'score'] = 999
    data_left['score'] = pd.cut(data_left['score'], break_points, right=False).values
    data_right['score'] = pd.cut(data_right['score'], break_points, right=False).values

    """Count psi of bad & good sample."""
    count_left = data_left.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_right = data_right.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_left['bad_ratio'] = count_left[1] / count_left[1].sum()
    count_right['bad_ratio'] = count_right[1] / count_right[1].sum()
    count_left['good_ratio'] = count_left[0] / count_left[0].sum()
    count_right['good_ratio'] = count_right[0] / count_right[0].sum()
    count_final = pd.merge(count_left, count_right, left_index=True,
                           right_index=True, suffixes=['_left', '_right'])
    count_final['psi_bad'] = (count_left['bad_ratio'] - count_right['bad_ratio']) * \
                             np.log(count_left['bad_ratio'] / count_right['bad_ratio'])
    count_final['psi_good'] = (count_left['good_ratio'] - count_right['good_ratio']) * \
                              np.log(count_left['good_ratio'] / count_right['good_ratio'])

    average_psi = (count_final['psi_bad'].replace([np.inf, np.nan], 0.0).sum() + \
                   count_final['psi_good'].replace([np.inf, np.nan], 0.0).sum()) / 2

    """Plot image"""
    if plot_image == True:
        plot_range = ['bad_ratio_left', 'good_ratio_left',
                      'bad_ratio_right', 'good_ratio_right']
        plot_label = ['Bad Ratio of Test Sample', 'Good Ratio of Test Sample',
                      'Bad Ratio of Train Set', 'Good Ratio of Train Set']
        color = ['blue', 'red', 'green', 'cyan']
        marker = ['s', 'x', 'o', 'v']
        plt.figure(figsize=(9,6))
        for p, l, c, m in zip(plot_range, plot_label, color, marker):
            value = count_final[p].values
            score_range = range(len(count_final.index))
            score_label = ['[300,350)', '[350,400)',
                           '[400,450)', '[450,500)',
                           '[500,550]', '[550,600)',
                           '[600,650)', '[650,700]',
                           '[700,750]', '[750,800)',
                           '[800,850]', '[850,900)',
                           '[900,950]', '[950,1000)']
            plt.plot(score_range, value, color=c, marker=m,
                     markersize=1, label=l)
        plt.grid()
        plt.legend(loc='upper left')
        plt.xticks(score_range, score_label, rotation=45)
        plt.title('PSI of Score Card')
        plt.ylabel('Ratio')
        plt.tight_layout()
        plt.show()
    print('Average PSI:%f' % average_psi)
    return count_final

#类别变量转数值变量
def cate_var_transform(X, Y):
    ##取出数据类型
    d_type = X.dtypes
    object_var = X.iloc[:, np.where(d_type == "object")[0]]
    num_var = X.iloc[:, np.where(d_type != "object")[0]]

    # object_transfer_rule用于记录每个类别变量的数值转换规则
    object_transfer_rule = list(np.zeros([len(object_var.columns)]))

    # object_transform是类别变量数值化转化后的值
    object_transform = pd.DataFrame(np.zeros(object_var.shape),
                                    columns=object_var.columns)

    for i in range(0, len(object_var.columns)):

        temp_var = object_var.iloc[:, i]

        ##除空值外的取值种类
        unique_value = np.unique(temp_var.iloc[np.where(~temp_var.isna())[0]])

        transform_rule = pd.concat([pd.DataFrame(unique_value, columns=['raw data']),
                                    pd.DataFrame(np.zeros([len(unique_value), 2]),
                                                 columns=['transform data', 'bad rate'])], axis=1)
        for j in range(0, len(unique_value)):
            bad_num = len(np.where((Y == 1) & (temp_var == unique_value[j]))[0])
            all_num = len(np.where(temp_var == unique_value[j])[0])

            # 计算badprob
            if all_num == 0:  # 防止all_num=0的情况，报错
                all_num = 0.5
            transform_rule.iloc[j, 2] = 1.0000000 * bad_num / all_num

        # 按照badprob排序，给出转换后的数值
        transform_rule = transform_rule.sort_values(by='bad rate')
        transform_rule.iloc[:, 1] = list(range(len(unique_value), 0, -1))

        # 保存转换规则
        object_transfer_rule[i] = {object_var.columns[i]: transform_rule}
        # 转换变量
        for k in range(0, len(unique_value)):
            transfer_value = transform_rule.iloc[np.where(transform_rule.iloc[:, 0] == unique_value[k])[0], 1]
            object_transform.iloc[np.where(temp_var == unique_value[k])[0], i] = float(transfer_value)
        object_transform.iloc[np.where(object_transform.iloc[:, i] == 0)[0], i] = np.nan

    X_transformed = pd.concat([num_var, object_transform], axis=1)
    return (X_transformed, transform_rule)

#去除缺失率、单一值率过高的变量
def missing_identity_select(data,y='flagy', missing_rate=0.9, identity_rate=0.9, kp_vars=None):
    if kp_vars:
        data1 = data.drop(columns=kp_vars)
    else:
        data1 = data.copy()
    null_ratio = data1.isnull().sum() / data1.shape[0]
    data1 = data1.iloc[:,np.where(null_ratio <= missing_rate)[0]]
    identity = data1.drop(columns='flagy').apply(lambda x: x.value_counts().max() / x.size).reset_index(name='identity_rate').rename(columns={'index': 'variable'})
    identity_vars = identity[identity['identity_rate'] <= identity_rate]['variable'].to_list()
    data1 = data[identity_vars + kp_vars + ['flagy']]
    return data1
#剔除自变量与因变量相关性过低、自变量与自变量相关性高的变量
def delete_corelation(data, y='flagy', y_cor=0.1, x_cor=0.7, kp_vars=None):
    if kp_vars:
        data1 = data.drop(columns=kp_vars) 
    else:
        data1 = data.copy()
    cor = data1.corr().abs()
    cor_y = cor[y]
    y_select = cor_y[cor_y >= y_cor ]
    y_select.sort_values(ascending=False)
    cor_x = cor.loc[y_select.index.values, y_select.index.values]
    cor_x.drop(labels=y, inplace=True)
    cor_x.drop(columns=y, inplace=True)
    drop_index = []
    for i in range(len(cor_x)-1):
        drop_index.extend(np.where(cor_x.iloc[i+1:,i] >= x_cor)[0])
    drop_index = list(set(drop_index))
    drop_name = cor_x.index[drop_index].tolist()
    cor_x.drop(index=drop_name, inplace=True)
    final_vars = cor_x.index.tolist() + kp_vars + [y]
    data = data[final_vars]
    return data
#变量PSI计算
def psi_var(data_train, data_test):
    data_train1 = data_train.copy()
    data_test1 = data_test.copy()
    var_names = data_train.columns.tolist()
    drop = []
    for name in var_names:
        num = data_train[name].unique().shape[0]
        if num <= 6:
            breaks = np.linspace(data_train[name].min()-1, data_train[name].max(), num+1)
        else:
            breaks = np.linspace(data_train[name].min()-1, data_train[name].max(), 6+1)
        data_train1[name] = pd.cut(data_train[name], bins=breaks, right=True)
        data_test1[name] = pd.cut(data_test[name], bins=breaks, right=True)
        data_train1[name] = data_train1[name].astype(str)
        data_test1[name] = data_test1[name].astype(str)
        df1 = data_train1[name].value_counts().rename('train')
        df2 = data_test1[name].value_counts().rename('test')
        df = pd.concat([df1, df2], axis=1)
        df['train_ratio'] = df.train / df.train.sum()
        df['test_ratio'] = df.test / df.test.sum()
        df['psi'] = (df.train_ratio - df.test_ratio) * np.log(df.train_ratio / df.test_ratio)
        df.psi.replace(np.inf, 0)
        if df.psi.sum() > 0.1:
            #del data_train[name]
            #del data_test[name]
            drop.append(name)
        del df1
        del df2
        del df
    print('PSI偏高的被剔除的变量：\n{}'.format(drop))
    return drop

def psi_var_table(data_train, data_test):
    psi_table = pd.DataFrame()
    data_train1 = data_train.copy()
    data_test1 = data_test.copy()
    var_names = data_train.columns.tolist()
    drop = []
    for name in var_names:
        num = data_train[name].unique().shape[0]
        if num <= 6:
            breaks = data_train[name].value_counts().sort_index().index.to_list()
            #breaks = np.linspace(data_train[name].min()-1, data_train[name].max(), num+1)
        else:
            breaks = np.linspace(data_train[name].min(), data_train[name].max(), 6)
        
        breaks = [-np.inf] + list(breaks) + [np.inf]
        data_train1[name] = pd.cut(data_train[name], bins=breaks, right=True)
        data_test1[name] = pd.cut(data_test[name], bins=breaks, right=True)
        data_train1[name] = data_train1[name].astype(str)
        data_test1[name] = data_test1[name].astype(str)
        df1 = data_train1[name].value_counts().rename('train')
        df2 = data_test1[name].value_counts().rename('test')
        df = pd.concat([df1, df2], axis=1)
        df['train_ratio'] = df.train / df.train.sum()
        df['test_ratio'] = df.test / df.test.sum()
        df['psi'] = (df.train_ratio - df.test_ratio) * np.log(df.train_ratio / df.test_ratio)
        #df.psi.replace(np.inf, 0)
        df['psi_all'] = df.psi.sum()
        df['var'] = name
        df.reset_index(inplace=True)
        psi_table = pd.concat([psi_table, df],axis=0, sort=False)
        psi_table = psi_table[['var', 'index','train', 'train_ratio', 'test', 'test_ratio', 'psi', 'psi_all']]
    return psi_table

def badrate_month(df):
    df['user_date'] = pd.to_datetime(df['user_date'])
    df_temp = df[['user_date','flagy']]
    df_temp['year'] = df_temp['user_date'].apply(lambda x : str(x.year))
    df_temp['month'] = df_temp['user_date'].apply(lambda x : str(x.month)+'月')
    badrate_m = df_temp.groupby(['year','month'])['flagy'].agg([len,sum])
    badrate_m['坏客率%'] = badrate_m['sum']/badrate_m['len']*100 
    badrate_m.rename(columns = {'len':'样本量','sum':'坏客数量'},inplace = True)
    return badrate_m

def score_psi_cal_high(df_score1,score_1 = 'score',score_2 = 'score_new',score_range=[300, 1000],tick=50,if_flagy = True,flag = 'flagy'):
    df_score = df_score1.copy()
    df_score.loc[df_score[score_1] <= score_range[0],score_1] = score_range[0]
    df_score.loc[df_score[score_1] >= score_range[1],score_1] = score_range[1] - 1
    df_score.loc[df_score[score_2] <= score_range[0],score_2] = score_range[0]
    df_score.loc[df_score[score_2] >= score_range[1],score_2] = score_range[1] - 1    

    df_score["score1_bin"] = pd.cut(df_score[score_1], bins=range(score_range[0], score_range[1] + 1, tick), right=False)
    df_score["score2_bin"] = pd.cut(df_score[score_2], bins=range(score_range[0], score_range[1] + 1, tick), right=False)
    if if_flagy:
        df1 = df_score.groupby(by=["score1_bin", flag]).size().unstack(level=[1], fill_value=0)
        df1 = df1.fillna(0)
        df1.sort_index(ascending=True, inplace=True)
        df1["评分1样本量"] = df1[1] + df1[0]
        df1["评分1占比"] = df1["评分1样本量"] / df1["评分1样本量"].sum()
        df1["评分1坏客户数"] = df1[1]
        df1["评分1坏客户占比"] = df1["评分1坏客户数"] / df1["评分1坏客户数"].sum()
        df1["评分1区间坏客率"] = df1["评分1坏客户数"] / df1["评分1样本量"]
        del df1[0], df1[1]
        
        df2 = df_score.groupby(by=["score2_bin", flag]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["评分2样本量"] = df2[1] + df2[0]
        df2["评分2占比"] = df2["评分2样本量"] / df2["评分2样本量"].sum()
        df2["评分2坏客户数"] = df2[1]
        df2["评分2坏客户占比"] = df2["评分2坏客户数"] / df2["评分2坏客户数"].sum()
        df2["评分2区间坏客率"] = df2["评分2坏客户数"] / df2["评分2样本量"]
        del df2[0], df2[1]
        # 并表并计算psi
        df_output = pd.merge(df1,df2, how="outer", left_index=True,right_index = True)
        df_output = df_output.fillna(0)
        df_output["psi"] = (df_output["评分1占比"] - df_output["评分2占比"]) * np.log(df_output["评分1占比"] / df_output["评分2占比"])
        df_output["psi_bad"] = (df_output["评分1坏客户占比"] - df_output["评分2坏客户占比"]) * np.log(df_output["评分1坏客户占比"] / df_output["评分2坏客户占比"])
    else:
        df1 = df_score.groupby(by=["score1_bin"])[['score1_bin']].count().rename(columns = {'score1_bin':'评分1样本量'})
        df1["评分1占比"] = df1["评分1样本量"] / df1["评分1样本量"].sum()
        df2 = df_score.groupby(by=["score2_bin"])[['score2_bin']].count().rename(columns = {'score2_bin':'评分2样本量'})
        df2["评分2占比"] = df2["评分2样本量"] / df2["评分2样本量"].sum()
        df_output = pd.merge(df1,df2, how="outer", left_index=True,right_index = True)
        df_output = df_output.fillna(0)
        df_output["psi"] = (df_output["评分1占比"] - df_output["评分2占比"]) * np.log(df_output["评分1占比"] / df_output["评分2占比"])

    return df_output

def score_psi_cal_freq(df_score1,score_1 = 'score',score_2 = 'score_new',score_range = [300,1000],percent = 5,if_flagy = True,flag = 'flagy'):
    df_score = df_score1.copy()
    df_score.loc[df_score[score_1] <= score_range[0],score_1] = score_range[0]
    df_score.loc[df_score[score_1] >= score_range[1],score_1] = score_range[1] - 1
    df_score.loc[df_score[score_2] <= score_range[0],score_2] = score_range[0]
    df_score.loc[df_score[score_2] >= score_range[1],score_2] = score_range[1] - 1  

    df_score.sort_values(by=score_1, ascending=True, inplace=True)
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(df_score[score_1].values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]

    df_score["score1_bin"] = pd.cut(df_score[score_1], bins=breaks, right=False)
    df_score["score2_bin"] = pd.cut(df_score[score_2], bins=breaks, right=False)

    if if_flagy:
        df1 = df_score.groupby(by=["score1_bin", flag]).size().unstack(level=[1], fill_value=0)
        df1 = df1.fillna(0)
        df1.sort_index(ascending=True, inplace=True)
        df1["评分1样本量"] = df1[1] + df1[0]
        df1["评分1占比"] = df1["评分1样本量"] / df1["评分1样本量"].sum()
        df1["评分1坏客户数"] = df1[1]
        df1["评分1坏客户占比"] = df1["评分1坏客户数"] / df1["评分1坏客户数"].sum()
        df1["评分1区间坏客率"] = df1["评分1坏客户数"] / df1["评分1样本量"]
        del df1[0], df1[1]
        
        df2 = df_score.groupby(by=["score2_bin", flag]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["评分2样本量"] = df2[1] + df2[0]
        df2["评分2占比"] = df2["评分2样本量"] / df2["评分2样本量"].sum()
        df2["评分2坏客户数"] = df2[1]
        df2["评分2坏客户占比"] = df2["评分2坏客户数"] / df2["评分2坏客户数"].sum()
        df2["评分2区间坏客率"] = df2["评分2坏客户数"] / df2["评分2样本量"]
        del df2[0], df2[1]
        # 并表并计算psi
        df_output = pd.merge(df1,df2, how="outer", left_index=True,right_index = True)
        df_output = df_output.fillna(0)
        df_output["psi"] = (df_output["评分1占比"] - df_output["评分2占比"]) * np.log(df_output["评分1占比"] / df_output["评分2占比"])
        df_output["psi_bad"] = (df_output["评分1坏客户占比"] - df_output["评分2坏客户占比"]) * np.log(df_output["评分1坏客户占比"] / df_output["评分2坏客户占比"])
    else:
        df1 = df_score.groupby(by=["score1_bin"])[['score1_bin']].count().rename(columns = {'score1_bin':'评分1样本量'})
        df1["评分1占比"] = df1["评分1样本量"] / df1["评分1样本量"].sum()
        df2 = df_score.groupby(by=["score2_bin"])[['score2_bin']].count().rename(columns = {'score2_bin':'评分2样本量'})
        df2["评分2占比"] = df2["评分2样本量"] / df2["评分2样本量"].sum()
        df_output = pd.merge(df1,df2, how="outer", left_index=True,right_index = True)
        df_output = df_output.fillna(0)
        df_output["psi"] = (df_output["评分1占比"] - df_output["评分2占比"]) * np.log(df_output["评分1占比"] / df_output["评分2占比"])

    return df_output

def report(
    data_total, 
    data_train, 
    data_test,
    data_oot = None,
    model = None,
    y = 'flagy',
    filename='',
    points0 = 55,
    pdo = -10,
    odds0 = 0.1,
    grey = 2,
    score_range = (0,100),
    tick = 10,
    percent = 5,
    top_n = 10,
    **kwargs):
    """
    :param data_total: dataframe 所有训练样本，不含验证集，包含入模变量、标签及申请日期，申请日期最好为日期格式，有灰客户样本
    :param data_train: dataframe 训练集，只包含入模变量及标签，不含灰客户
    :param data_test: dataframe 测试集，只包含入模变量及标签，不含灰客户
    :param data_oot: dataframe 验证集，默认为None，不含灰客户
    :param model: 最终模型,默认为bst
    :param y: str 标签名，默认为'flagy'
    :param filename: str 报告名，默认为'',输出名称自动加'report_生成日期'后缀，如'lgb模型report_2021_07_23'
    :param points0: int 基准分，默认55
    :param pdo: int pd0，默认-10
    :param odds0: float 坏账率比，默认0.1
    :param grey: int, float or str 灰客户的取值标识，默认取2
    :param score_range: tuple 评分的上下限，如 (0, 100)
    :param tick: int or float 评分分布的分数间隔，默认为10
    :param percent: int or float 评分等频分布的分位数间隔，默认为5，即5%分位数
    :param top_n: int 输出重要性为前n的变量分箱图，默认为10，即输出变量重要度为前10的变量分箱图
    :param kwargs: 其他变量,如user_date='user_date'：申请日期名称，出现在data_total中，主要用于报告第2部分样本分析
    """
    import datetime as dtt
    # 文件名
    filename = filename + 'report_' + dtt.datetime.now().strftime('%Y-%m-%d-%H-%M')
    # 取出模型中的变量,确认函数中传入的样本包含入模变量和标签
    var_final = model.booster_.feature_name()
    data_train = data_train[var_final + [y]].copy() # 训练集只取入模变量和y
    data_test = data_test[var_final + [y]].copy() # 测试集只取入模变量和y
    try:
        data_oot = data_oot[var_final + [y]].copy() # 验证集只取入模变量和y
    except:
        pass

    # 进行excel报告内容整理
    table = pd.ExcelWriter(filename + '.xlsx',engine = 'xlsxwriter')
    # ------------------------------------------------------------------------------------------------------------------
    # 目录页
    sheet = pd.DataFrame(
        columns=["编号", "中文简称", "英文简称", "内容"],
        data=[
            ["1", "模型使用说明", "Model_Explain", "模型使用说明"],
            ["2", "原始数据统计", "Original_Stat", "原始数据情况、建模数据选取、匹配百融数据说明"],
            ["3", "衍生特征构造", "Var_derivation", "衍生特征构造"],
            ["4", "数据预处理-格式转换", "Data_Pre_Format", "数据预处理-格式转换"],
            ["5", "模型参数", "Model_Params", "模型参数及评分参数设定"],
            ["6", "模型区分度评估", "Model_Disc", "模型区分度评估"],
            ["7", "模型稳定性评估", "Model_Stab", "模型稳定性评估"],
            ["8", "单变量稳定性", "Var_Stab", "单变量稳定性评估"],
            ["9", "变量重要性", "Var_Importance", "单变量重要性排序"],           
            ["10", "变量趋势", "Var_trend", "重要变量趋势"],
            ["11", "样本风险评分分布", "Model_Score", "模型评分及风险表现"],
            ["12", "评分决策表", "Decision_table", "不同评分分段的通过率、违约率提升"],
        ],
    )
    sheet.to_excel(table, sheet_name="目录", startrow=0, startcol=0, index=False)
    # -------------------------------------------------------------------------------------------------------------------
    # 1.模型使用说明页
    head = pd.DataFrame(columns=["返回目录"])
    sheet1 = pd.DataFrame(
        index=["版本名称", "模型类型", "客群种类", "该版本更新时间", "开发人员", "建模样本数据量", "模型变量数量", "核心算法"],
        columns=["内容"],
    )
    head.to_excel(table, sheet_name="1.模型使用说明", startrow=0, index=False)
    sheet1.to_excel(table, sheet_name="1.模型使用说明", startrow=1)    
    # -------------------------------------------------------------------------------------------------------------------
    # 2.原始数据统计页
    head2_1 = pd.DataFrame(columns=["一、数据来源"])
    sheet2_1 = pd.DataFrame(
        index=[
            "机构",
            "产品类型",
            "业务开展时间",
            "引流渠道",
            "额度区间",
            "期数范围",
            "存量客户数量",
            "日进件量",
            "平均通过率",
            "审批流程",
            "审批使用数据",
        ],
        columns=["内容"],
    )
    head2_2 = pd.DataFrame(columns=["二、数据概要"])
    sheet2_2 = pd.DataFrame(
        index=[
            "客群描述",
            "观察期",
            "表现期",
            "原始样本时间",
            "原始样本量",
            "建模样本时间",
            "建模样本量",
            "验证样本时间",
            "验证样本量",
        ],
        columns=["内容"],
    )

    head2_3 = pd.DataFrame(columns=["三、好坏客户定义"])
    sheet2_3 = pd.DataFrame(columns=["客户类型", "定义方式", "样本量", "好坏客户定义描述"])
    sheet2_3["客户类型"] = ["坏客户", "灰客户", "好客户"]

    head2_4 = pd.DataFrame(columns=["四、建模数据统计情况"])
    sheet2_4 = pd.DataFrame(columns=["年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    if "user_date" in kwargs.values():
        data_total['user_date'] = pd.to_datetime(data_total['user_date'], errors="coerce")
        temp = data_total.copy()
        temp["年"] = temp['user_date'].apply(lambda x: str(x.year) + "年")
        temp["月"] = temp['user_date'].apply(lambda x: str(x.month) + "月")
        temp = (
            temp[temp[y] != grey]
            .groupby(["年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        temp["比例"] = temp["数量"] / temp["数量"].sum()
        temp["坏账率"] = temp["坏数量"] / temp["数量"]
        temp["平均坏账率"] = temp["坏数量"].sum() / temp["数量"].sum()
        temp = temp[["年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_4 = temp.copy()

    head2_5 = pd.DataFrame(columns=["五、建模数据选取"])
    sheet2_5 = pd.DataFrame(columns=["类型", "年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    sheet2_5["类型"] = ["训练", "测试", "验证"]
    if "user_date" in kwargs.values():
        data_train["user_date"] = data_total['user_date']
        data_test["user_date"] = data_total['user_date']
        data_train["类型"] = "训练"
        data_test["类型"] = "测试"
        try:
            data_oot["类型"] = "验证"
            data_oot["user_date"] = data_oot['user_date']
        except:
            pass
        data_merge = pd.concat([data_train, data_test, data_oot], axis=0, sort=False)
        data_merge["年"] = data_merge['user_date'].apply(lambda x: str(x.year))
        data_merge["月"] = data_merge['user_date'].apply(lambda x: str(x.month) + "月")
        data_merge = (
            data_merge.groupby(["类型", "年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        data_merge["比例"] = data_merge["数量"] / data_merge["数量"].sum()
        data_merge["坏账率"] = data_merge["坏数量"] / data_merge["数量"]
        data_merge["平均坏账率"] = data_merge["坏数量"].sum() / data_merge["数量"].sum()
        data_merge = data_merge[["类型","年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_5 = data_merge.copy()
        del data_train['user_date'],data_test['user_date'],data_train['类型'],data_test['类型']
        try:
            del data_oot['user_date'],data_oot['类型']
        except:
            pass

    head2_6 = pd.DataFrame(columns=["六、数据集划分"])
    sheet2_6 = pd.DataFrame(columns=["数据量", "坏样本", "坏账率"], index=["训练集", "测试集", "验证集"])
    try:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], data_oot.shape[0]]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), data_oot[y].sum()]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]
    except:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], 0]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), 0]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]

    head.to_excel(table, sheet_name="2.原始数据统计", startrow=0, index=False)
    head2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=1, index=False)
    sheet2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=3, startcol=1)
    head2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=16, index=False)
    sheet2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=18, startcol=1)
    head2_3.to_excel(table, sheet_name="2.原始数据统计", startrow=29, index=False)
    sheet2_3.to_excel(
        table, sheet_name="2.原始数据统计", startrow=31, startcol=1, index= False
    )
    head2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=36, index=False)
    if sheet2_4.shape[0] == 0:
        sheet2_4.to_excel(
            table, sheet_name="2.原始数据统计", startrow=38, startcol=1, index=False
        )
    else:
        sheet2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=38, startcol=1,index=False)
    row_number = sheet2_4.shape[0] + 38 + 17
    head2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number, index=False)
    sheet2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number + 2, startcol=1,index = False)
    row_number1 = row_number + 2 + sheet2_5.shape[0] + 2
    head2_6.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number1, index=False)
    sheet2_6.to_excel(
        table, sheet_name="2.原始数据统计", startrow=row_number1 + 2, startcol=1
    )
    # ---------------------------------------------------------------------------------------------------
    # 3.衍生变量构造
    sheet3 = pd.DataFrame(columns=["序号", "模块", "变量", "中文名", "数据来源", "衍生逻辑"])
    head.to_excel(table, sheet_name="3.衍生变量构造", index=False)
    sheet3.to_excel(table, sheet_name="3.衍生变量构造", startrow=2, index=False)
    # ----------------------------------------------------------------------------------------------------
    # 4.数据预处理
    sheet4 = pd.DataFrame(columns=["序号", "变量", "数据源", "变量类型", "编码(转换)格式", "举例"])
    head.to_excel(table, sheet_name="4.数据预处理", index=False)
    sheet4.to_excel(table, sheet_name="4.数据预处理", startrow=2, index=False)
    # -----------------------------------------------------------------------------------------------------
    # 5.模型参数
    col = list(str(bst).split(','))
    model_param = pd.DataFrame(col,columns = ['parms'])
    model_param[['parms','参数值']] = model_param.parms.str.split('=',2,expand=True)
    model_param['parms'] = model_param['parms'].apply(lambda x : x.replace('\n',''))
    model_param['parms'] = model_param['parms'].apply(lambda x :str(x).strip())
    model_param.iloc[0,0] = model_param.iloc[0,0][15:]
    model_param.iloc[13,1] = model_param.iloc[13,1][:-1]
    mapping = pd.DataFrame({'parms':['bagging_fraction','feature_fraction','bagging_freq','importance_type','learning_rate',
                                'max_bin','max_depth','min_data_in_leaf','n_estimators',
                                'num_leaves','objective','random_state','reg_alpha','reg_lambda'],
                            '模型参数':['每次抽取的样本比例','每次抽取的特征比例','装袋频率','重要性计算方式',
                               '学习率','最大分箱数量','最大树深','每个叶子节点的最少样本量',
                                '学习器数量','叶子结点数量','分类方式','随机参数','一阶惩罚项','二阶惩罚项']})
    model_param = pd.merge(model_param,mapping,how = 'left',on = 'parms')
    sheet5 = model_param[['parms','模型参数','参数值']].copy()
    
    head.to_excel(table, sheet_name="5.模型参数", index=False)
    sheet5.to_excel(table, sheet_name="5.模型参数", index=False, startrow=2)
    # -----------------------------------------------------------------------------------------------------
    # 6.模型区分度评估
    title = pd.DataFrame(columns=["模型区分度评估"])
    sheet6 = pd.DataFrame(columns=["评估指标", "训练集", "测试集", "验证集"])
    sheet6["评估指标"] = ["KS", "AUC"]
    # 计算KS、AUC
    # 预测概率
    train_class_pred = model.predict_proba(data_train.drop(columns = [y]))[:,1]
    test_class_pred = model.predict_proba(data_test.drop(columns = [y]))[:,1]
    try:
        oot_class_pred = model.predict_proba(data_oot.drop(columns = [y]))[:,1]
    except:                                            
        pass
    # 评估
    train_perf = sc.perf_eva(data_train[y], train_class_pred, title="train")
    test_perf = sc.perf_eva(data_test[y], test_class_pred, title="test")
    train_perf["pic"].savefig("train_KS_AUC.png", bbox_inches="tight")
    test_perf["pic"].savefig("test_KS_AUC.png", bbox_inches="tight")
    sheet6["训练集"] = [train_perf["KS"], train_perf["AUC"]]
    sheet6["测试集"] = [test_perf["KS"], test_perf["AUC"]]
    try:
        oot_perf = sc.perf_eva(data_oot[y], oot_class_pred, title="oot")
        oot_perf["pic"].savefig("oot_KS_AUC.png", bbox_inches="tight")
        sheet6["验证集"] = [oot_perf["KS"], oot_perf["AUC"]]
    except:
        pass
    title1 = pd.DataFrame(
        columns=[
            "此次建模，训练样本KS={}，AUC={}，模型结果较理想，模型对好坏客户具有很好的区分度，且模型较稳定，达到建模预期目标".format(
                train_perf["KS"], train_perf["AUC"])])
    title2 = pd.DataFrame(columns=["训练集", "KS={}".format(train_perf["KS"])])
    title3 = pd.DataFrame(columns=["测试集", "KS={}".format(test_perf["KS"])])
    title.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=1)                                     
    title1.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=8)
    title2.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=3)
    title3.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=11)
    try:
        title4 = pd.DataFrame(columns=["验证集", "KS={}".format(oot_perf["KS"])])
        title4.to_excel(
            table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=19
        )
    except:
        pass

    head.to_excel(table, sheet_name="6.模型区分度评估", index=False)
    sheet6.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=2)
    # 曲线图
    sheet = table.book.sheetnames["6.模型区分度评估"]
    sheet.insert_image("A12", "train_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    sheet.insert_image("I12", "test_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    try:
        sheet.insert_image("Q12", "oot_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 7.模型稳定性评估
    title1 = pd.DataFrame(columns=["1.训练&测试"])
    title2_1 = pd.DataFrame(columns=["等间距", "模型样本量分布评估"])
    title2_2 = pd.DataFrame(columns=["等频", "模型样本量分布评估"])
    head.to_excel(table, sheet_name="7.模型稳定性评估", index=False)
    title1.to_excel(table, sheet_name="7.模型稳定性评估", index=False, startrow=1)
    title2_1.to_excel(table, sheet_name="7.模型稳定性评估", index=False, startrow=2)
    title2_2.to_excel(
        table, sheet_name="7.模型稳定性评估", index=False, startrow=2, startcol=11
    )
    B = pdo / np.log(2)
    A = points0 + B * np.log(odds0)
    score_train = np.around(A - B * np.log(train_class_pred/(1 - train_class_pred)))
    score_train = pd.DataFrame(score_train,index = data_train.index)
    score_train = pd.concat([data_train['flagy'],score_train],axis = 1).rename(columns = {0:'score'})
    score_test = np.around(A - B * np.log(test_class_pred/(1 - test_class_pred)))
    score_test = pd.DataFrame(score_test,index = data_test.index)
    score_test = pd.concat([data_test['flagy'],score_test],axis = 1).rename(columns = {0:'score'})
    try:
        score_oot = np.around(A - B * np.log(oot_class_pred/(1 - oot_class_pred)))
        score_oot = pd.DataFrame(score_oot,index = data_oot.index)
        score_oot = pd.concat([data_oot['flagy'],score_oot],axis = 1).rename(columns = {0:'score'})
    except:
        pass
    # 生成表格--------------
    # 训练集&测试集-------
    # 等高分布-----
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", y]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df.sort_index(ascending=True, inplace=True)
    df["训练样本量"] = df[1] + df[0]
    df["训练集占比"] = df["训练样本量"] / df["训练样本量"].sum()
    df["训练坏客户数"] = df[1]
    df["训练坏客户占比"] = df["训练坏客户数"] / df["训练坏客户数"].sum()
    del df[0], df[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1["score"] = pd.cut(
        df1.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet7_1 = df.merge(df1, how="outer", on="score")
    sheet7_1 = sheet7_1.fillna(0)
    sheet7_1["psi"] = (sheet7_1["训练集占比"] - sheet7_1["测试集占比"]) * np.log(
        sheet7_1["训练集占比"] / sheet7_1["测试集占比"]
    )
    sheet7_1["psi_bad"] = (sheet7_1["训练坏客户占比"] - sheet7_1["测试坏客户占比"]) * np.log(
        sheet7_1["训练坏客户占比"] / sheet7_1["测试坏客户占比"]
    )

    # 等频分布------
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt.sort_values(by="score", ascending=True, inplace=True)
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(dt.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt.sort_index(ascending=True, inplace=True)
    dt["训练样本量"] = dt[1] + dt[0]
    dt["训练集占比"] = dt["训练样本量"] / dt["训练样本量"].sum()
    dt["训练坏客户数"] = dt[1]
    dt["训练坏客户占比"] = dt["训练坏客户数"] / dt["训练坏客户数"].sum()
    del dt[0], dt[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1.sort_values(by="score", ascending=True, inplace=True)
    df1["score"] = pd.cut(df1.score, bins=breaks, right=False)
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet7_2 = dt.merge(df1, how="outer", on="score")
    sheet7_2 = sheet7_2.fillna(0)
    sheet7_2["psi"] = (sheet7_2["训练集占比"] - sheet7_2["测试集占比"]) * np.log(
        sheet7_2["训练集占比"] / sheet7_2["测试集占比"]
    )
    sheet7_2["psi_bad"] = (sheet7_2["训练坏客户占比"] - sheet7_2["测试坏客户占比"]) * np.log(
        sheet7_2["训练坏客户占比"] / sheet7_2["测试坏客户占比"]
    )

    sheet7_1.to_excel(table, sheet_name="7.模型稳定性评估", startrow=4)
    sheet7_2.to_excel(table, sheet_name="7.模型稳定性评估", startrow=4, startcol=12)
    row_number = max(sheet7_2.shape[0], sheet7_1.shape[0]) + 4 + 20 + 2

    # 有验证集情况
    try:
        # 等高
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(
            df2.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet7_1 = df.merge(df2, how="outer", on="score")
        sheet7_1 = sheet7_1.fillna(0)
        sheet7_1["psi"] = (sheet7_1["训练集占比"] - sheet7_1["验证集占比"]) * np.log(
            sheet7_1["训练集占比"] / sheet7_1["验证集占比"]
        )
        sheet7_1["psi_bad"] = (sheet7_1["验证坏客户占比"] - sheet7_1["验证坏客户占比"]) * np.log(
            sheet7_1["验证坏客户占比"] / sheet7_1["验证坏客户占比"]
        )
        # 等频
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(df2.score, bins=breaks, right=False)
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet7_2 = dt.merge(df2, how="outer", on="score")
        sheet7_2 = sheet7_2.fillna(0)
        sheet7_2["psi"] = (sheet7_2["训练集占比"] - sheet7_2["验证集占比"]) * np.log(
            sheet7_2["训练集占比"] / sheet7_2["验证集占比"]
        )
        sheet7_2["psi_bad"] = (sheet7_2["训练坏客户占比"] - sheet7_2["验证坏客户占比"]) * np.log(
            sheet7_2["训练坏客户占比"] / sheet7_2["验证坏客户占比"]
        )
        title1 = pd.DataFrame(columns=["2.训练&验证"])
        title1.to_excel(
            table, sheet_name="7.模型稳定性评估", index=False, startrow=row_number
        )
        title2_1.to_excel(
            table, sheet_name="7.模型稳定性评估", index=False, startrow=row_number + 1
        )
        title2_2.to_excel(
            table,
            sheet_name="7.模型稳定性评估",
            index=False,
            startrow=row_number + 1,
            startcol=12,
        )

        sheet7_1.to_excel(table, sheet_name="7.模型稳定性评估", startrow=row_number + 3)
        sheet7_2.to_excel(
            table, sheet_name="7.模型稳定性评估", startrow=row_number + 3, startcol=12
        )
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 8.变量重要性
    feature_imp = pd.Series(model.feature_importances_)
    feature_name = pd.Series(model.booster_.feature_name())
    feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
    feature_df.sort_values('element', ascending=False, inplace=True)
    feature_df.set_index('feature_name', drop=True, inplace=True)
    try:
        iv = sc.iv(data_total[model.booster_.feature_name()+['flagy']],'flagy')
        missing_rate = data_total[model.booster_.feature_name()].apply(lambda x: x.isna().sum() / x.shape[0])
        iv.set_index('variable', drop=True, inplace=True)
        missing_rate = pd.DataFrame(missing_rate, columns=['缺失值占比'])

        sheet_8 = pd.concat([feature_df, iv, missing_rate], axis=1)
        sheet_8 = sheet_8.reset_index()
        sheet_8.columns = ['变量','重要性','IV值','缺失值占比']
        sheet_8["序号"] = list(range(1, sheet_8.shape[0] + 1))
        sheet_8["解释"] = ""
        sheet_8 = sheet_8[["序号", "变量", "解释", "重要性", "IV值", "缺失值占比"]]
    except:
        sheet_8 = pd.DataFrame(columns=["序号", "变量", "解释", "重要性", "IV值", "缺失值占比"])

    head.to_excel(table, sheet_name="8.变量重要性", index=False)
    title.to_excel(table, sheet_name="8.变量重要性", index=False, startrow=1)
    sheet_8.to_excel(table, sheet_name="8.变量重要性", index=False, startrow=2)
    # -------------------------------------------------------------------------------------------------------------------------
    # 9.变量趋势
    import matplotlib.pyplot as plt
    title = pd.DataFrame(columns = ["重要变量风险表现"])
    var = feature_df.index.tolist()[:top_n]
    data_bins = sc.woebin(data_total[var + [y]],y)
    sheet_9 = pd.concat(data_bins, ignore_index=True)[["variable", "bin","count", "count_distr", "bad", "badprob"]]
    title1 = pd.DataFrame(columns = ["序号","变量","分箱","区间数量","区间占比","区间坏客数","区间坏客率"])
    head.to_excel(table, sheet_name="9.变量趋势", index=False)
    title.to_excel(table, sheet_name="9.变量趋势", index=False, startrow=1)
    title1.to_excel(table, sheet_name="9.变量趋势", index=False, startrow=2)
    sheet_9.to_excel(
        table, sheet_name="9.变量趋势", index=False, startrow=3, startcol=1, header=False
    )
    sheet = table.book.sheetnames["9.变量趋势"]
    
    bin_pict = sc.woebin_plot(data_bins)
    i = 3
    for pict in list(sheet_9.variable.unique()):
        bin_pict[pict].savefig(pict + ".png", bbox_inches="tight")
        sheet.insert_image(
            "J" + str(i), pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
        )
        i = i + 12
    # -------------------------------------------------------------------------------------------------------------------------
    # 10.样本风险评分分布
    title1 = pd.DataFrame(columns=["1、等高分布"])
    title2 = pd.DataFrame(columns=["分数整体分布情况-训练集"])
    title3 = pd.DataFrame(columns=["分数整体分布情况-测试集"])
    title4 = pd.DataFrame(columns=["分数整体分布情况-验证集"])
    title5 = pd.DataFrame(columns=["分数整体分布情况-训练集+测试集"])
    head.to_excel(table, sheet_name="10.样本风险评分分布", index=False)
    title1.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=1)
    title2.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=2)
    title3.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=19)
    title5.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=2,startcol=11)
    title5.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=55,startcol=11)
    # 等高---------
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis=0,sort=False)
    df = score_total[score_total[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(df.score, bins=range(score_range[0], score_range[1] + 1, tick),right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=3,startcol = 11)
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=3)
    # 测试集
    df = score_test[score_test[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=20)
    # 等频---------
    title1 = pd.DataFrame(columns=["1、等频分布"])
    title1.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=54)
    title2.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=55)
    title3.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=77)
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis=0,sort=False)
    df = score_total[score_total[y] != grey]
    df.sort_values(by="score", ascending=True, inplace=True)
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(df.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    df["score"] = pd.cut(df.score, bins=breaks, right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=56,startcol = 11)
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=56)
    # 测试集
    dt = score_test[score_test[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=78)    
    
    # 有验证集情况
    try:
        # 等高
        title4.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=37)
        df = score_oot[score_oot[y] != grey]
        df.loc[df.score < score_range[0], "score"] = score_range[0]
        df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
        df["score"] = pd.cut(
            df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df = df.fillna(0)
        df["区间人数"] = df[0] + df[1]
        df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
        df["区间坏客户率"] = df[1] / df["区间人数"]
        df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
        df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
        df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
        df.reset_index(inplace=True)
        df.rename(columns={"score": "评分区间"}, inplace=True)
        del df[0], df[1]
        df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=38)
        # 等频
        title4.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=104)
        dt = score_oot[score_oot[y] != grey]
        dt.sort_values(by="score", ascending=True, inplace=True)
        dt.loc[dt.score < score_range[0], "score"] = score_range[0]
        dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
        dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
        dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        dt = dt.fillna(0)
        dt["区间人数"] = dt[0] + dt[1]
        dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
        dt["区间坏客户率"] = dt[1] / dt["区间人数"]
        dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
        dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
        dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
        dt.reset_index(inplace=True)
        dt.rename(columns={"score": "评分区间"}, inplace=True)
        del dt[0], dt[1]
        dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=105)
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 11.评分决策表
    head.to_excel(table, sheet_name="11.评分决策表", index=False)
    title1 = pd.DataFrame(columns=["1、等高"])
    title2 = pd.DataFrame(columns=["2、等频"])
    title3 = pd.DataFrame(columns=["评分决策表"])
    # 等高
    title1.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=1)
    title3.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=2)
    
    total_class_pred = model.predict_proba(data_total[var_final])[:,1]
    score_total = np.around(A - B * np.log(total_class_pred/(1 - total_class_pred)))
    score_total = pd.DataFrame(score_total,index = data_total.index)
    score_total = pd.concat([data_total['flagy'],score_total],axis = 1).rename(columns = {0:'score'})
    # score_total = pd.concat([score_train, score_test], axis=0, sort=False)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1] - 1 
    score_total["score"] = pd.cut(
        score_total.score,
        bins=range(score_range[0], score_range[1] + 1, tick),
        right=False,
    )
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=3)
    # 等频
    title2.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=25)
    title3.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=26)
    
    total_class_pred = model.predict_proba(data_total[var_final])[:,1]
    score_total = np.around(A - B * np.log(total_class_pred/(1 - total_class_pred)))
    score_total = pd.DataFrame(score_total,index = data_total.index)
    score_total = pd.concat([data_total['flagy'],score_total],axis = 1).rename(columns = {0:'score'})
    #score_total = pd.concat([score_train, score_test], axis=0, sort=False)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1]
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=27)
    table.save()    
    
    return 1

# 1.预处理---------------------------------------------------------------------------------------------------------------------
os.getcwd() # 查看当前所在位置

df = pd.read_csv('.csv',index_col = 'cus_num',low_memory = False,header = 0,skiprows = [1]) # 读取数据

df.rename(columns = {'other_var1':'flagy'},inplace = True) # y重命名

# 坏客率
badper = sum(df['flagy'] == 1)/len(df['flagy']) * 100
print(f'{badper}%')


# 不稳定变量
# col = [i for i in data_thin.columns if re.search('inteday|amount|passnum|neworgnum|newallnum|week|night|time|allorgnum|oth|else|tot_mons|orgType|flag_|rc_',i)]

# 2.变量粗筛---------------------------------------------------------------------------------------------------------------------
## 2.1 缺失值、同一值过高----------------------------------------------------------------------------------------------------------
data = missing_identity_select(df, 'flagy', missing_rate=0.9, identity_rate=0.9, kp_vars=['user_date']) 
# data = delete_corelation(data, 'flagy', y_cor=0.0, x_cor=0.95, kp_vars=['user_date'])
data.shape

## 2.2 数据分层------------------------------------------------------------------------------------------------------------------
from sklearn.model_selection import train_test_split

Y = data['flagy']
X = data.drop(columns=['flagy'])

# X_transform, transfrom_rule = cate_var_transform(X, Y, kp_vars = ['user_date', 'id', 'cell'])
data_train_x, data_test_x, data_train_y, data_test_y = train_test_split(X, Y, test_size=0.3, random_state=123, stratify=Y)
# 定义数据集
Y_train = data_train_y
X_train = data_train_x.drop(columns = ['name','sl_user_date','user_date','id','cell','swift_number'])
Y_test = data_test_y
X_test = data_test_x.drop(columns = ['name','sl_user_date','user_date','id','cell','swift_number'])
# 转换成lgb支持的格式
W_train = np.ones(X_train.shape[0])
W_test = np.ones(X_test.shape[0])
lgb_train = lgb.Dataset(X_train,Y_train,weight = W_train,free_raw_data = False)
lgb_eval = lgb.Dataset(X_test,Y_test,reference = lgb_train,weight = W_test,free_raw_data = False)

## 2.3 类别变量格式转为category---------------------------------------------------------------------------------------------------
object_vars = X_train.select_dtypes(include='O').columns.tolist()
X_train[object_vars] = X_train[object_vars].astype('category')
X_test[object_vars] = X_test[object_vars].astype('category')

# 删除类别变量
# X_train.drop(columns=object_vars, inplace=True)
# X_test.drop(columns=object_vars, inplace=True)

# 3.模型调参---------------------------------------------------------------------------------------------------------------------
## 3.1随机优化-------------------------------------------------------------------------------------------------------------------

### 定义超参数优化
MAX_EVALS = 50
N_FOLDS = 5
import csv
from hyperopt import STATUS_OK
from timeit import default_timer as timer

### 定义目标函数
import random
def random_objective(params, iteration, n_folds = N_FOLDS,loss_type='auc_train'):
    """Random search objective function. Takes in hyperparameters
        and returns a list of results to be saved."""
    start = timer()
    
     # Perform n_folds cross validation
    cv_results = lgb.cv(params, lgb_train, num_boost_round = 100, nfold = n_folds, 
                         early_stopping_rounds = 100, metrics = 'auc', seed = 50)
    end = timer()
    best_score = np.max(cv_results['auc-mean'])
    
     # Loss must be minimized
    loss = 1 - best_score
    
     # Boosting rounds that returned the highest cv score
    n_estimators = int(np.argmax(cv_results['auc-mean']) + 1)
    
     # Return list of results
    return [loss, params, iteration, n_estimators, end - start]

### 定义搜索域
# Hyperparameter grid
param_grid = {
     'class_weight': [None, 'balanced'],
     'boosting_type': ['gbdt', 'goss', 'dart'],
     'num_leaves': list(range(30, 150)),
     'learning_rate': list(np.logspace(np.log(0.005), np.log(0.2), base = np.exp(1), num = 1000)),
     'subsample_for_bin': list(range(20000, 300000, 20000)),
     'min_child_samples': list(range(20, 500, 5)),
     'reg_alpha': list(np.linspace(0, 1)),
     'reg_lambda': list(np.linspace(0, 1)),
     'colsample_bytree': list(np.linspace(0.6, 1, 10))
}

 # Subsampling (only applicable with 'gbdt' and 'dart')
subsample_dist = list(np.linspace(0.5, 1, 100))
random_results = pd.DataFrame(columns = ['loss', 'params', 'iteration', 'estimators', 'time'],
                        index = list(range(MAX_EVALS)))

### 求最优解
random.seed(50)

# Iterate through the specified number of evaluations
for i in range(MAX_EVALS):
    params={key: random.sample(value, 1)[0] for key,value in param_grid.items()}    
    # print(params)
    # Randomly sample parameters for gbm

    if params['boosting_type'] == 'goss':
        # Cannot subsample with goss
        params['subsample'] = 1.0
    else:
        # Subsample supported for gdbt and dart
        params['subsample'] = random.sample(subsample_dist, 1)[0]        
    results_list = random_objective(params, i)
    
     # Add results to next row in dataframe
    random_results.loc[i, :] = results_list
#结果
random_results.sort_values('loss', ascending = True, inplace = True)
random_results.reset_index(inplace = True, drop = True)

#最优超参数
best_random_params = random_results.loc[0, 'params'].copy()
best_random_estimators = int(random_results.loc[0, 'estimators'])


## 3.2贝叶斯调参 ----------------------------------------------------------------------------------------------------------------

#定义超参数优化（贝叶斯优化——Tree Parzen估计）
#objective函数里设计了三种loss funtion的形式，不同的loss funtion 会导致收敛速度不一样。还需要研究怎么设计loss function能比较快得达到最优。
from hyperopt import STATUS_OK
from timeit import default_timer as timer
MAX_EVALS = 100
N_FOLDS = 5


###定义超参数优化的目标函数
def objective(params, n_folds = N_FOLDS):
    """Objective function for Gradient Boosting Machine Hyperparameter Optimization"""
    
    # Keep track of evals
    global ITERATION
    
    ITERATION += 1
    
    # Retrieve the subsample if present otherwise set to 1.0
    subsample = params['boosting_type'].get('bagging_fraction', 1.0)
    
    # Extract the boosting type
    params['boosting_type'] = params['boosting_type']['boosting_type']
    params['bagging_fraction'] = subsample
    
    # Make sure parameters that need to be integers are integers
    for parameter_name in ['num_leaves', 'bagging_freq', 'min_data_in_leaf', 'max_bin', 'max_depth', 'n_estimators']:
        params[parameter_name] = int(params[parameter_name])
    params['num_leaves'] = min((params['num_leaves'], 2 ** params['max_depth']))
    for parameter_name in ['learning_rate', 'bagging_fraction', 'feature_fraction', 'reg_lambda', 'reg_alpha']:
        params[parameter_name] = round(params[parameter_name], 4)
    start = timer()
     
    # Perform n_folds cross validation
    lgb_model = lgb.LGBMClassifier(n_jobs = -1, 
                                       objective = 'binary', random_state = 50 , **params)
    lgb_model.fit(X_train, Y_train)
    pred1 = lgb_model.predict_proba(X_train)[:,1]
    pred2 = lgb_model.predict_proba(X_test)[:,1]
    auc1 = roc_auc_score(Y_train, pred1)
    auc2 = roc_auc_score(Y_test, pred2)
    run_time = timer() - start
    
    # Extract the best score
    loss = abs(auc1 - auc2) / auc2 - auc2
    
    # Dictionary with information for evaluation
    return {'loss': loss, 'params': params, 'iteration': ITERATION,
            'train_time': run_time, 'status': STATUS_OK}

###定义搜索域

'''
hp.choice返回一个选项，选项可以是list或者tuple.options可以是嵌套的表达式，用于组成条件参数。 
hp.pchoice(label,p_options)以一定的概率返回一个p_options的一个选项。这个选项使得函数在搜索过程中对每个选项的可能性不均匀。 
hp.uniform(label,low,high)参数在low和high之间均匀分布。 
hp.quniform(label,low,high,q),参数的取值是round(uniform(low,high)/q)*q，适用于那些离散的取值。 
hp.loguniform(label,low,high)绘制exp(uniform(low,high)),变量的取值范围是[exp(low),exp(high)] 
hp.randint(label,upper) 返回一个在[0,upper)前闭后开的区间内的随机整数
'''
from hyperopt import hp
from hyperopt.pyll.stochastic import sample
space = {
#     'boosting_type': hp.choice('boosting_type', [{'boosting_type': 'gbdt', 'bagging_fraction': hp.uniform('gbdt_bagging_fraction', 0.5, 1)}, 
#                                                  {'boosting_type': 'dart', 'bagging_fraction': hp.uniform('dart_bagging_fraction', 0.5, 1)},
#                                                  {'boosting_type': 'goss', 'bagging_fraction': 1.0}]),
    'boosting_type': hp.choice('boosting_type', [{'boosting_type': 'gbdt', 'bagging_fraction': hp.uniform('gbdt_bagging_fraction', 0.5, 1)}]),
    'num_leaves': hp.quniform('num_leaves', 4, 32, 1),
    'learning_rate': hp.loguniform('learning_rate', np.log(0.01), np.log(0.2)),
    'bagging_freq': hp.uniform('bagging_freq', 0, 5),
    'feature_fraction': hp.uniform('feature_fraction', 0.5, 1),
    'min_data_in_leaf': hp.uniform('min_data_in_leaf', 10, 100),
    'max_bin': hp.uniform('max_bin', 4, 20),
    'reg_alpha': hp.uniform('reg_alpha', 0.0, 1.0),
    'reg_lambda': hp.uniform('reg_lambda', 0.0, 1.0),
    'importance_type': 'gain',
    'max_depth': hp.uniform('max_depth', 2, 6),
    'n_estimators': hp.uniform('n_estimators', 50, 200),
#     'min_gain_to_split': hp.uniform('n_estimators', 0, 100),
    #     'subsample_for_bin': hp.quniform('subsample_for_bin', 20000, 300000, 20000),
#     'min_child_samples': hp.quniform('min_child_samples', 20, 500, 5),
#     'reg_alpha': hp.uniform('reg_alpha', 0.0, 1.0),
#     'reg_lambda': hp.uniform('reg_lambda', 0.0, 1.0),
#     'colsample_bytree': hp.uniform('colsample_by_tree', 0.6, 1.0)
}

#定义优化函数
from hyperopt import tpe
#定义结果存储变量
from hyperopt import Trials
# Keep track of results
bayes_trials = Trials()
#定义中间结果存储文件
# out_file = 'results/gbm_trials.csv'
# of_connection = open(out_file, 'w')
# writer = csv.writer(of_connection)

# # Write the headers to the file
# writer.writerow(['loss', 'params', 'iteration', 'estimators', 'train_time'])
# of_connection.close()

### 求最优解
from hyperopt import fmin
# Global variable
global  ITERATION

ITERATION = 0

# Run optimization
best = fmin(fn = objective, space = space, algo = tpe.suggest, 
            max_evals = MAX_EVALS, trials = bayes_trials, rstate = np.random.RandomState(123))
#按loss从小到大排序
# Sort the trials with lowest loss (highest AUC) first
bayes_trials_results = sorted(bayes_trials.results, key = lambda x: x['loss'])
bayes_trials_results[:2]
#结果
# results = pd.read_csv('d:/gbm_trials.csv')
# Sort with best scores on top and reset index for slicing
# results.sort_values('loss', ascending = True, inplace = True)
# results.reset_index(inplace = True, drop = True)
# results.head()
# import ast
# Convert from a string to a dictionary
# ast.literal_eval(bayes_trials_results.loc[0, 'params'])
#获取最优的超参数
# best_bayes_estimators = int(bayes_trials_results[0]['estimators'])
best_bayes_params = bayes_trials_results[0]['params']
# Re-create the best model and train on the training data

best_bayes_params

################################## 手动调整参数 #####################################
best_bayes_params = {'bagging_fraction': 0.7431,
 'bagging_freq': 0,
 'boosting_type': 'gbdt',
 'feature_fraction': 0.7352,
 'importance_type': 'gain',
 'learning_rate': 0.0105,
 'max_bin': 10,
 'max_depth': 2,
 'min_data_in_leaf': 24,
 'n_estimators': 92,
 'num_leaves': 3,
 'reg_alpha': 0.3853,
 'reg_lambda': 0.3642}
# 'min_gain_to_split': 100

################################## 用最优超参数训练模型 #####################################
print('Starting training...')
start_time = time.time()
best_bayes_model = lgb.LGBMClassifier(n_jobs = -1, 
                                       objective = 'binary', random_state = 50, **best_bayes_params)
best_bayes_model.fit(X_train, Y_train, eval_metric='auc', verbose=2)
# gbm.fit(X_train, Y_train)
# gbm = lgb.train(params, lgb_train, num_boost_round=10, feature_name = 'auto')
print('train over')
end_time = time.time()
print('训练时间为：', end_time - start_time)

################################## 模型效果 #####################################
# get the predicted class
# bst = lgb.Booster(model_file='model.txt')

train_class_pred = best_bayes_model.predict_proba(X_train)[:, 1]
test_class_pred = best_bayes_model.predict_proba(X_test)[:, 1]

from sklearn.metrics import roc_curve
fpr_train, tpr_train, thresholds_test = roc_curve(np.array(Y_train), train_class_pred)
fpr_test, tpr_test, thresholds_test = roc_curve(np.array(Y_test), test_class_pred)
# df = pd.DataFrame({'FPR': fpr_test, 'TPR': tpr_test, 'Thresholds': thresholds_test})
# print(df)
ks_train = max(tpr_train - fpr_train)
ks_test = max(tpr_test - fpr_test)
print("KS(Train): %f" % ks_train)
print("KS(Test): %f" % ks_test)

# get the auc
from sklearn.metrics import roc_auc_score
print("AUC Score(Train): %f" % roc_auc_score(Y_train, train_class_pred))
print("AUC Score(Test): %f" % roc_auc_score(Y_test, test_class_pred))

################################## Feature Importance #####################################
feature_imp = pd.Series(best_bayes_model.feature_importances_)
feature_name = best_bayes_model._Booster.feature_name()
feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
feature_df = feature_df[feature_df['element'] > 0]
print(feature_df)

# 4.变量筛选 ----------------------------------------------------------------------------------------------------------------

## 4.1 psi筛选 ----------------------------------------------------------------------------------------------------------------
X_train1 = X_train[feature_df.feature_name.tolist()]
X_test1 = X_test[feature_df.feature_name.tolist()]
#oot_X = oot_X[feature_df.feature_name.tolist()]
cat_var = X_train1.select_dtypes('category').columns.tolist()
#drop_vars1 = psi_var(X_train1, oot_X)
drop_vars2 = psi_var(X_train1.drop(columns = cat_var), X_test1.drop(columns = cat_var))
drop_vars = list(set(drop_vars2))#| set(drop_vars1))

## 4.2特征重要度筛选----------------------------------------------------------------------------------------------------------------

#参数调整
best_bayes_params = {'bagging_fraction': 0.7431,
 'bagging_freq': 0,
 'boosting_type': 'gbdt',
 'feature_fraction': 0.7352,
 'importance_type': 'gain',
 'learning_rate': 0.0105,
 'max_bin': 10,
 'max_depth': 2,
 'min_data_in_leaf': 24,
 'n_estimators': 92,
 'num_leaves': 3,
 'reg_alpha': 0.3853,
 'reg_lambda': 0.3642}

# 循环筛选：每次遍历都剔除feature_importance==0的变量
drop_vars = []
X_train_final = X_train1.drop(columns = drop_vars)
while True:
    X_test_final = X_test[X_train_final.columns]
    num_train, num_feature = X_train_final.shape
    W_train = np.ones(X_train_final.shape[0])
    W_test = np.ones(X_test_final.shape[0])

    # create dataset for lightgbm
    # if you want to re-use data, remember to set free_raw_data = False
    lgb_train = lgb.Dataset(X_train_final, Y_train, weight=W_train, free_raw_data=False)
    lgb_eval = lgb.Dataset(X_test_final, Y_test, reference=lgb_train, weight=W_test, free_raw_data=False)

    # import xgboost as xgb

    print('Starting training...')
    start_time = time.time()
    print('training...')

    # gbm.fit(X_train, Y_train)
    gbm = lgb.LGBMClassifier(n_jobs = -1, 
                                           objective = 'binary', random_state = 50, **best_bayes_params)
    gbm.fit(X_train_final, Y_train)
    print('train over')
    end_time = time.time()
    feature_imp = pd.Series(gbm.feature_importances_)
    feature_name = pd.Series(gbm.booster_.feature_name())
    feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
#     feature_df = feature_df.sort_values(by='element', ascending=False)
    feature_df = feature_df[feature_df['element'] > 0]
    if len(feature_imp) == len(feature_df):
        break
    else:
        X_train_final = X_train[feature_df.feature_name.tolist()]
        
# joblib.dump('....pkl')

################################## Feature Importance #####################################
feature_imp = pd.Series(gbm.feature_importances_)
feature_name = pd.Series(gbm.booster_.feature_name())
feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
# feature_df = feature_df.reset_index().drop(columns=['index'])
# feature_df = feature_df.sort_values(by='element', ascending=False)
feature_df = feature_df[feature_df['element'] > 0]
print(feature_df)

################################## 变量分箱图 #####################################
var = feature_df['feature_name']
feature_importance = feature_df['element']
var_df = pd.Series(feature_importance, index=var)
var_df.sort_values(ascending=False, inplace=True)
var = var_df.index.tolist()
data1 = data[var + ['flagy']]

train_bins = sc.woebin(data1.loc[X_train.index,:], 'flagy')
test_bins = sc.woebin(data1.loc[X_test.index,:], 'flagy')

import matplotlib.pyplot as plt
for col in var:
    p1=sc.woebin_plot(train_bins[col])
    print('train',col)
    plt.show(p1)
    p2=sc.woebin_plot(test_bins[col])
    print('test',col)
    plt.show(p2)

################################## 手动删除分箱异常变量 #####################################
#参数调整
# best_bayes_params = {'bagging_freq': 1,
#  'boosting_type': 'gbdt',
#  'feature_fraction': 0.6965,
#  'importance_type': 'gain',
#  'learning_rate': 0.0586,
#  'max_bin': 10,
#  'max_depth': 3,
#  'min_data_in_leaf': 58,
#  'n_estimators': 133,
#  'num_leaves': 4,
#  'reg_alpha': 0.6507,
#  'reg_lambda': 0.2647,
#  'bagging_fraction': 0.7629}

#手动删除分箱异常的变量
mannual_drops=[]
# mannual_drops=['flag_profilepopulation','flag_fraudrelation_g','flag_graylist', 'pp_birthyear', 'pp_age', 'pp_gender', 'cf_cons_C13_views']
X_train_final1 = X_train_final.drop(columns=mannual_drops)
X_test_final = X_test[X_train_final1.columns]

#重新训练，如果删除了分箱异常变量，X_train_final需要改成X_train_final1
start_time = time.time()
# gbm.fit(X_train, Y_train)
gbm = lgb.LGBMClassifier(n_jobs = -1, 
                                       objective = 'binary', random_state = 50, **best_bayes_params)
gbm.fit(X_train_final, Y_train)

import joblib
# joblib.dump(gbm, './机器学习模型/cons_bank_lgb_model.pkl')

# 5.模型评估----------------------------------------------------------------------------------------------------------------
# get the predicted class
bst = gbm

train_class_pred = bst.predict_proba(X_train_final)[:,1]
test_class_pred = bst.predict_proba(X_test_final)[:,1]

from sklearn.metrics import roc_curve
fpr_train, tpr_train, thresholds_test = roc_curve(np.array(Y_train), train_class_pred)
fpr_test, tpr_test, thresholds_test = roc_curve(np.array(Y_test), test_class_pred)

ks_train = max(tpr_train - fpr_train)
ks_test = max(tpr_test - fpr_test)
print("KS(Train): %f" % ks_train)
print("KS(Test): %f" % ks_test)

# get the auc
from sklearn.metrics import roc_auc_score
print("AUC Score(Train): %f" % roc_auc_score(Y_train, train_class_pred))
print("AUC Score(Test): %f" % roc_auc_score(Y_test, test_class_pred))

################################ KS plot ##################################
import scorecardpy as sc
train_perf = sc.perf_eva(Y_train, train_class_pred, title="train")
test_perf = sc.perf_eva(Y_test, test_class_pred, title="test")
# train_perf = sc.perf_eva(Y_train, train_class_pred, title="train: ROC", plot_type=['roc'])
# test_perf = sc.perf_eva(Y_test, test_class_pred, title="test: ROC", plot_type=['roc'])

# 6.保存----------------------------------------------------------------------------------------------------------------
import joblib

joblib.dump(X_train_final,'X_train_final.pkl')
joblib.dump(X_test_final,'X_test_final.pkl')
joblib.dump(Y_train,'Y_train.pkl')
joblib.dump(Y_test,'Y_test.pkl')
joblib.dump(gbm,'gbm.pkl')
joblib.dump(feature_df,'feature_df.pkl')

# 7.打分&看分布----------------------------------------------------------------------------------------------------------------
## 7.1打分看分布---------------------------------------------------------------------------------------------------
############################ Score Distribution ###########################
train_p = train_class_pred.copy()
test_p = test_class_pred.copy()
# whole_p = whole_class_pred.copy()
points0 = 650
pdo = 120
odds0 = 1/19
B = pdo / np.log(2)
A = points0 + B * np.log(odds0)
train_score = np.around(A - B * np.log(train_p/(1 - train_p)))
test_score = np.around(A - B * np.log(test_p/(1 - test_p)))
# whole_score = np.around(A + B * np.log(whole_p/(1 - whole_p)))

arr1 = np.arange(train_score.shape[0])
train_score = pd.Series(train_score)
s1 = pd.Series(arr1)
train_psi = pd.concat([train_score, s1, Y_train.reset_index(drop=True)], ignore_index=True, axis=1)
train_psi.columns = ['score', 'id', 'y']

arr1 = np.arange(test_score.shape[0])
test_score = pd.Series(test_score)
s1 = pd.Series(arr1)
test_psi = pd.concat([test_score, s1, Y_test.reset_index(drop=True)], ignore_index=True, axis=1)
test_psi.columns = ['score', 'id', 'y']

# arr1 = np.arange(whole_score.shape[0])
# whole_score = pd.Series(whole_score)
# s1 = pd.Series(arr1)
# whole_psi = pd.concat([whole_score, s1, Y], ignore_index=True, axis=1)
# whole_psi.columns = ['score', 'id', 'y']
############################################################################
print(train_score.max(), train_score.min())
# psi_stats_score1(test_psi, train_psi, non_computed=None, plot_image=True)
sc.perf_psi(score = {'train':train_psi[['score']], 'test':test_psi[['score']]},
      label = {'train':train_psi['y'], 'test':test_psi['y']},
      # x_limits = [300, 1000],
      x_tick_break = 10)

## 7.2如果做融合模型输出预测值/score---------------------------------------------------------------------------------------------------
# train_score = np.around(A - B * np.log(train_p/(1 - train_p)))
# test_score = np.around(A - B * np.log(test_p/(1 - test_p)))
# train_score = pd.DataFrame(train_score,index = X_train_final.index) # train的score拼接cus_num
# test_score = pd.DataFrame(test_score,index = X_test_final.index) # test的score拼接cus_num
# train_score['pred'] = train_p
# test_score['pred'] = test_p
# score_total = pd.concat([train_score,test_score],axis = 0) # train和test拼在一起
# result = pd.concat([data[['id','cell','user_date','flagy']],score_total],axis = 1) # 三要素和score拼在一起
# result.rename(columns = {0:'score'},inplace = True)
# result.to_csv('union1_pre_score.csv')

## 7.3验证--------------------------------------------------------------------------------------------------------------
model = gbm
features = model.booster_.feature_name()
oot_X = oot_X[features]
oot_pred = model.predict_proba(oot_X)[:, 1]
from sklearn.metrics import roc_curve
fpr_oot, tpr_oot, thresholds_oot = roc_curve(np.array(oot_Y), oot_pred)

ks_oot = max(tpr_oot - fpr_oot)
print("KS(oot): %f" % ks_oot)

from sklearn.metrics import roc_auc_score
print("AUC Score(OOT): %f" % roc_auc_score(oot_Y, oot_pred))
import scorecardpy as sc
oot_perf = sc.perf_eva(oot_Y, oot_pred, title="oot: KS", plot_type=['ks'])
oot_perf = sc.perf_eva(oot_Y, oot_pred, title="oot: ROC", plot_type=['roc'])

## 7.4打分(cus_num重置，用身份证号、手机号等)---------------------------------------------------------------------------------------------------
#训练、测试集打分
# train_psi['pred'] = train_p
# test_psi['pred'] = test_p
# del train_psi['id'], test_psi['id']
# train_psi = train_psi.join(data_train_x[['id', 'cell', 'user_date']].reset_index(drop=True))
# test_psi = test_psi.join(data_test_x[['id', 'cell', 'user_date']].reset_index(drop=True))
# train_psi.rename(columns={'y': 'flagy'}, inplace=True)
# test_psi.rename(columns={'y': 'flagy'}, inplace=True)
# train_psi = train_psi[['id', 'cell', 'user_date', 'pred', 'score','flagy']]
# test_psi = test_psi[['id', 'cell', 'user_date', 'pred', 'score','flagy']]
# train_psi.to_csv('./机器学习模型/train_score.csv', encoding='utf_8_sig', index=False)
# test_psi.to_csv('./机器学习模型/test_score.csv', encoding='utf_8_sig', index=False)
# score_all = pd.concat([train_psi, test_psi], axis=0)
# score_all.to_csv('./机器学习模型/total_score.csv', encoding='utf_8_sig')
#验证集打分
# oot_score = np.around(A - B * np.log(oot_pred/(1 - oot_pred)))
# oot_psi = pd.DataFrame(oot_score, columns=['score'])
# oot_psi['flagy'] = oot_Y.values
# oot_psi['pred'] = oot_pred
# oot_psi = oot_psi.join(oot[['cus_num', 'id', 'cell', 'user_date']].reset_index(drop=True))
# oot_psi = oot_psi[['cus_num', 'id', 'cell', 'user_date', 'pred', 'score','flagy']]
# oot_psi.to_csv('./lgbm模型/oot_score.csv', encoding='utf_8_sig', index=False)

# 8.数据概览---------------------------------------------------------------------------------------------------
## 8.0自动输出report
report(data_total,
       data_train,
       data_test,
       data_oot=None,
       model=bst,
       y='flagy',
       filename='1',
       points0=55,
       pdo=-10,
       odds0=0.1,
       grey=2,
       score_range=(0, 100),
       tick=10,
       percent=5,
       top_n = 10,
       user_data = 'user_date')
## 8.1数据量、y样本量---------------------------------------------------------------------------------------------------
train_psi['user_date'] = pd.to_datetime(train_psi.user_date)
test_psi['user_date'] = pd.to_datetime(test_psi.user_date)
# oot_psi['user_date'] = pd.to_datetime(oot_psi.user_date)
groupkey = pd.Grouper(key='user_date', freq='m')
print(train_psi.groupby(groupkey).size())
print(train_psi.groupby(groupkey)['flagy'].sum())

print(test_psi.groupby(groupkey).size())
print(test_psi.groupby(groupkey)['flagy'].sum())

# print(oot_psi.groupby(groupkey).size())
# print(oot_psi.groupby(groupkey)['flagy'].sum())
## 8.2变量趋势---------------------------------------------------------------------------------------------------
import joblib
# model = joblib.load('./机器学习模型/cons_bank_lgb_model.pkl')
model = gbm
var = model.booster_.feature_name()
feature_importance = model.booster_.feature_importance()
var_df = pd.Series(feature_importance, index=var)
var_df.sort_values(ascending=False, inplace=True)
var = var_df.index.tolist()
data1 = data[var + ['flagy']]
bins = sc.woebin(data1, 'flagy')
breaklist = {}
for key in bins:
    breaklist[key] = bins[key]['breaks'].tolist()
# oot.rename(columns={'other_var1': 'flagy'}, inplace=True)
# data2 = oot[var + ['flagy']]
# oot_bins = sc.woebin(data2, 'flagy', breaks_list=breaklist)
for col in var:
    sc.woebin_plot(bins[col])

#oot变量趋势
breaklist = {}
for key in bins:
    breaklist[key] = bins[key]['breaks'].tolist()
oot.rename(columns={'other_var1': 'flagy'}, inplace=True)
data2 = oot[var + ['flagy']]
oot_bins = sc.woebin(data2, 'flagy', breaks_list=breaklist)
for col in var:
    sc.woebin_plot(oot_bins[col])

## 8.3变量重要性、iv、缺失率---------------------------------------------------------------------------------------------------
# gbm = joblib.load('./机器学习模型/cons_bank_lgb_model.pkl')
data_train = pd.concat([data_train_x, data_train_y], axis=1)
iv = sc.iv(data_train[gbm.booster_.feature_name()+['flagy']],'flagy')
missing_rate = X_train[gbm.booster_.feature_name()].apply(lambda x: x.isna().sum() / x.shape[0])
iv.set_index('variable', drop=True, inplace=True)
missing_rate = pd.DataFrame(missing_rate, columns=['缺失值占比'])

feature_imp = pd.Series(gbm.feature_importances_)
feature_name = pd.Series(gbm.booster_.feature_name())
feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
feature_df.sort_values('element', ascending=False, inplace=True)
feature_df.set_index('feature_name', drop=True, inplace=True)
output = pd.concat([feature_df, iv, missing_rate], axis=1)
output.to_csv('./机器学习模型/iv_missing_rate.csv', encoding='utf_8_sig')

## 8.4 PSI---------------------------------------------------------------------------------------------------
data_test = pd.concat([data_test_x, data_test_y], axis=1)
data_train1 = data_train[gbm.booster_.feature_name()]
data_test1 = data_test[gbm.booster_.feature_name()]
# data_oot1 = oot[gbm.booster_.feature_name()]
table = psi_var_table(data_train1,data_test1)
# table.to_csv('./lgbm模型/train_test_psi.csv', encoding='utf_8_sig', index=False)
# table1 = psi_var_table(data_train1, data_oot1)
# table1.to_csv('./lgbm模型/train_oot_psi.csv', encoding='utf_8_sig', index=False)

# 9.shap---------------------------------------------------------------------------------------------------
import shap
shap.initjs()

model = gbm
var = model.booster_.feature_name()
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X_train[var]) 
# 某个样本的情况
X = X_train[var]
shap.force_plot(explainer.expected_value[0], shap_values[1][2,:], X.iloc[2,:])
# 特征总体分析
from matplotlib import pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
shap.summary_plot(shap_values[1], X) #,plot_type="bar"
# 单变量情况
shap.dependence_plot('als_m6_id_nbank_avg_monnum', shap_values[1], X, interaction_index=None, show=False)




# 10.解析代码---------------------------------------------------------------------------------------------------
from model_parser.lgb import LGBModelParser

final_features = model.booster_.feature_name()
mp = LGBModelParser(model, final_features)

with open('ygxb_lgbm.py', 'w', encoding='utf-8') as f:
    f.write(mp.parse())



