# -*- coding: utf-8 -*-
"""
Logistic建模用到的函数

Created on Dec 2021

Author@chang.lu
"""
import pandas as pd
import numpy as np
import os
import scorecardpy as sc
import re
import statsmodels.api as sm
import statsmodels.stats.outliers_influence as oi
import datetime as dtt
from sklearn.linear_model import LogisticRegression
import joblib
from joblib import Parallel, delayed
from matplotlib import pyplot as plt
from sklearn import tree

def flag_hit(dat):
    # 查看每个产品的匹配率
    flag_list = [i for i in dat.columns if re.match('flag_',i) != None]
    for i in flag_list:
        dat[i] = dat[i].replace([98,99],0,regex = True)
    count = len(dat)
    pp = dat[flag_list].apply(pd.value_counts).T[1]
    pp_rate  = pp/count
    df = pd.DataFrame(index = pp.index)
    df['匹配数量'] = list(pp)
    df['匹配率'] = ['{:.2%}'.format(i) for i in list(pp_rate)]
    return df

def badrate_all(df,y = 'flagy'):
    df_temp = df[y].value_counts(dropna = False).reset_index()
    df_temp['总人数'] = sum(df_temp[y])
    df_temp['比率'] = (df_temp[y]/df_temp['总人数']).apply(lambda x:'%.2f'%(x*100)+'%')
    df_temp.columns = ['flagy','人数','总人数','比率']
    return df_temp

def badrate_month(df,date_var = 'user_date'):
    df_temp = df[[date_var,'flagy']]
    df_temp[date_var] = pd.to_datetime(df_temp[date_var])
    df_temp['年份'] = df_temp[date_var].apply(lambda x : str(x.year))
    df_temp['月份'] = df_temp[date_var].apply(lambda x : str(x.month)+'月')
    badrate_m = df_temp.groupby(['年份','月份'])['flagy'].agg([len,sum])
    badrate_m['坏客率'] = (badrate_m['sum']/badrate_m['len']).apply(lambda x:'%.2f'%(x*100)+'%') 
    badrate_m.rename(columns = {'len':'总数','sum':'坏客数'},inplace= True)
    return badrate_m

def var_ks(df_data, col_var ,flagy):
    list_big = []
    for i in col_var:
        list_small = []
        try:
            auc = roc_auc_score(y_true=df_data[flagy], y_score=df_data[i])
            fpr, tpr, _ = roc_curve(y_true=df_data[flagy], y_score=df_data[i])
            ks = max(abs(tpr-fpr))
            list_small.extend([i, auc, ks])
            list_big.append(list_small)
        except:pass
    result = pd.DataFrame(list_big, columns = ['var', 'auc', 'ks']).sort_values(by='ks',ascending=False)
    return result
    
def cate_var_transform(X,Y):
    ##取出数据类型
    d_type = X.dtypes
    object_var = X.iloc[:, np.where(d_type == "object")[0]]
    num_var = X.iloc[:, np.where(d_type != "object")[0]]
    
    #object_transfer_rule用于记录每个类别变量的数值转换规则
    object_transfer_rule = {}
    
    #object_transform是类别变量数值化转化后的值
    object_transform = pd.DataFrame(np.zeros(object_var.shape),
                                    columns=object_var.columns) 
    
    for i in range(0,len(object_var.columns)):
        
        temp_var = object_var.iloc[:, i]
        
        ##除空值外的取值种类
        unique_value=np.unique(temp_var.iloc[np.where(~temp_var.isna() )[0]])
    
        transform_rule=pd.concat([pd.DataFrame(unique_value,columns=['raw data']),
                                       pd.DataFrame(np.zeros([len(unique_value),2]),
                                                    columns=['transform data','bad rate'])],axis=1) 
        for j in range(0,len(unique_value)):
            bad_num=len(np.where( (Y == 1) & (temp_var == unique_value[j]) )[0])
            all_num=len(np.where(temp_var == unique_value[j])[0])
            
            #计算badprob
            if all_num == 0:#防止all_num=0的情况，报错
                all_num=0.5  
            transform_rule.iloc[j,2] = 1.0000000*bad_num/all_num
        
        #按照badprob排序，给出转换后的数值
        transform_rule = transform_rule.sort_values(by='bad rate')
        transform_rule.iloc[:,1]=list(range(len(unique_value),0,-1))
         
        #保存转换规则
        object_transfer_rule.update({object_var.columns[i]: transform_rule})
        #转换变量
        for k in range(0,len(unique_value)):
            transfer_value = transform_rule.iloc[np.where(transform_rule.iloc[:,0] == unique_value[k])[0],1]
            object_transform.iloc[np.where(temp_var == unique_value[k])[0],i] = float(transfer_value)
        object_transform.iloc[np.where(object_transform.iloc[:,i] == 0)[0],i] = np.nan 
    
    X_transformed = pd.concat([num_var,object_transform],axis = 1) 
    return(X_transformed,object_transfer_rule)

def psi(train,test,psi_comb):
    """
    说明：查看训练与测试集之间的psi。并返回psi大于等于psi_comb的变量。
    输入：train：训练集 test: 测试集 psi_comb：psi阈值
    返回：psi_df：变量PSI数据集  drop_psi:psi大于阈值的变量 
    """    
    PSI = {}
    for col in train.columns:
        train_prop = train[col].value_counts(normalize = True, dropna = False)
        test_prop = test[col].value_counts(normalize = True, dropna = False)
        psi = np.sum((train_prop - test_prop) * np.log(train_prop/test_prop))
        PSI[col] = [np.round(psi,decimals = 4)]
    psi_df = pd.DataFrame(PSI,columns = PSI.keys()).T
    psi_df.columns = ['psi']
    keep_psi = list(psi_df[psi_df.psi < psi_comb].index)
    drop_col = list(psi_df[psi_df.psi >= psi_comb].index)
    #import toad
    #psi_df= toad.metrics.PSI(train,test).sort_values(0).reset_index().rename(columns = {'index':'feature',0:'psi'})
    #drop_psi = list(psi_df[psi_df.psi >= psi_comb].feature)
    #return psi_df,keep_psi

    df_dropcol_psi = pd.DataFrame([i.replace("_woe", "") for i in drop_col],columns = ['drop_col'])
    df_dropcol_psi['DropReason'] = 'psi>%s'%psi_comb
    df_dropcol_psi['Count'] = len(drop_col)
    return keep_psi,df_dropcol_psi,psi_df
 
def lasso_selection(reg_data,reg_target,alpha_ = 0.001,max_iter_=10000,Lasso_CV = True):
    if Lasso_CV:
        from sklearn.linear_model import LassoCV
        lassocv = LassoCV()
        lassocv.fit(reg_data, reg_target)
        # 交叉验证选择的参数alpha
        print(f'交叉验证选择的参数alpha：{lassocv.alpha_}')
        # # 最终Lasso模型中的变量系数
        # print(lassocv.coef_[:10])
        # Lasso细筛出的变量个数
        print(f'筛选出的变量个数：{np.sum(lassocv.coef_ > 0)}')
        mask = lassocv.coef_ > 0
    else:
        # 非交叉验证拟合lasso
        from sklearn.linear_model import Lasso
        lasso=Lasso(alpha = alpha_,max_iter = max_iter_)
        lasso.fit(reg_data, reg_target)
        # 最终Lasso模型中的变量系数
        print(f'变量系数：{lasso.coef_[:10]}')
        # Lasso细筛出的变量个数
        print(f'筛选出的变量个数：{np.sum(lasso.coef_ > 0)}')
        mask = lasso.coef_ > 0
    lasso_sel_var = reg_data.iloc[:, mask].columns.tolist()
    drop_col = list(set(reg_data.columns.tolist())-set(lasso_sel_var))
    
    df_dropcol_lasso = pd.DataFrame([i.replace("_woe", "") for i in drop_col],columns = ['drop_col'])
    df_dropcol_lasso['DropReason'] = 'lasso'
    df_dropcol_lasso['Count'] = len(drop_col)
    return lasso_sel_var,df_dropcol_lasso

def stepwise_selection(X, y, 
                       initial_list = [], 
                       threshold_in = 0.01, 
                       threshold_out = 0.05, 
                       verbose = True):
    """ Perform a forward-backward feature selection 
    based on p-value from statsmodels.api.OLS
    Arguments:
        X - pandas.DataFrame with candidate features
        y - list-like with the target
        initial_list - list of features to start with (column names of X)
        threshold_in - include a feature if its p-value < threshold_in
        threshold_out - exclude a feature if its p-value > threshold_out
        verbose - whether to print the sequence of inclusions and exclusions
    Returns: list of selected features 
    Always set threshold_in < threshold_out to avoid infinite looping.
    See https://en.wikipedia.org/wiki/Stepwise_regression for the details
    """
    initial_var = X.columns.tolist()
    included = list(initial_list)
    while True:
        changed = False
        # forward step
        excluded = list(set(X.columns)-set(included))
        new_pval = pd.Series(index=excluded)
        for new_column in excluded:
            model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included+[new_column]])), family = sm.families.Binomial()).fit()
            new_pval[new_column] = model.pvalues[new_column]
        best_pval = new_pval.min()
        if best_pval < threshold_in:
            best_feature = new_pval.index[new_pval.argmin()]
            included.append(best_feature)
            changed=True
            if verbose:
                print('Add  {:30} with p-value {:.6}'.format(best_feature, best_pval))

        # backward step
        model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included])), family = sm.families.Binomial()).fit()
        # use all coefs except intercept
        pvalues = model.pvalues.iloc[1:]
        worst_pval = pvalues.max() # null if pvalues is empty
        if worst_pval > threshold_out:
            changed=True
            worst_feature = pvalues.index[pvalues.argmax()]
            included.remove(worst_feature)
            if verbose:
                print('Drop {:30} with p-value {:.6}'.format(worst_feature, worst_pval))
        if not changed:
            break
    drop_col = list(set(initial_var)-set(included))
    
    df_dropcol_stepwise = pd.DataFrame([i.replace("_woe", "") for i in drop_col],columns = ['drop_col'])
    df_dropcol_stepwise['DropReason'] = 'stepwise'
    df_dropcol_stepwise['Count'] = len(drop_col)
    return included,df_dropcol_stepwise

#缺失度和集中度筛选
def missing_identify_select(df,y='flagy',missing_trd=0.9,identify_trd=0.95,identify_num=1):
    
    #缺失率
    #missing_rate=df.isna().sum()/len(df)
    col_drop_missing=df.columns[df.isna().sum()/len(df)>missing_trd].tolist()
    dropcol_missing = pd.DataFrame(col_drop_missing,columns = ['drop_col'])
    dropcol_missing['DropReason'] = 'missingrate>%s' %missing_trd
    dropcol_missing['Count'] = len(col_drop_missing)
    col_rest_missing=list(set(df.columns)-set(col_drop_missing)-set([y]))
    
    #集中度
    #除缺失外只有一个唯一值
    col_drop_identify1=np.array(col_rest_missing)[df[col_rest_missing].nunique()<=identify_num].tolist()
    #单个值的集中度大于某个阈值
    idt_rate = df[col_rest_missing].apply(lambda a: a.value_counts().max() / a.size)
    col_drop_identify2=np.array(col_rest_missing)[idt_rate>identify_trd].tolist()
    
    col_drop_identify=list(set(col_drop_identify1+col_drop_identify2))
    
    dropcol_identify = pd.DataFrame(col_drop_identify,columns = ['drop_col'])
    dropcol_identify['DropReason'] = 'identifyrate>%s or identify_num<=%s' %(identify_trd,identify_num)
    dropcol_identify['Count'] = len(col_drop_identify)
    
    col_rest=list(set(df.columns)-set(col_drop_missing)-set(col_drop_identify))
    
    df_drop_miss_iden=pd.concat([dropcol_missing,dropcol_identify],axis=0)
    
    return col_rest,df_drop_miss_iden

def nan_iden_filter(df_,missing_limit,identical_limit,kp_var = None):
    df_1 = df_.copy()
    if kp_var:
        df_ = df_.drop(columns = kp_var)
    # 缺失值
    nan_rate = lambda a: a[a.isnull()].size/a.size
    na_perc = df_.apply(nan_rate).reset_index(name='missing_rate').rename(columns={'index':'variable'})
    # 同值
    idt_rate = lambda a: a.value_counts().max() / a.size
    identical_perc = df_.apply(idt_rate).reset_index(name='identical_rate').rename(columns={'index':'variable'})
    
    dt_merge = na_perc.merge(identical_perc,on = 'variable') # 合并
    dt_var_sel = dt_merge.query('(missing_rate <= {}) & (identical_rate <= {})'.format(missing_limit,identical_limit))
    dt_var_del = dt_merge.query('(missing_rate > {}) | (identical_rate > {})'.format(missing_limit,identical_limit))
    var_sel = dt_var_sel['variable'].tolist()
    if kp_var:
        df_final = df_1[var_sel + kp_var]
    else:
        df_final = df_1[var_sel]
    return df_final

def bin_iv_filter(df1,bins_num = 10,y='flagy',iv_limit = 0.02,kp_var = None):
    df = df1.copy()
    if kp_var:
        df = df.drop(columns = kp_var)
    # 等频分箱后筛选变量
    df_bin = df.drop(columns = [y]).apply(lambda x: pd.qcut(x,bins_num,labels = None,duplicates = 'drop'))
    df_merge_y = pd.concat([df_bin,df[y]],axis = 1)
    ivlist = iv(df_merge_y, y=y)
    iv_var = ivlist[ivlist.info_value >= iv_limit].variable.tolist()
    if kp_var:
        # df_final = df1[iv_var + kp_var + [y]]
        remain_col = iv_var + kp_var + [y]
    else:
        # df_final = df1[iv_var + [y]]
        remain_col = iv_var + [y]

    drop_col_bin_iv = list(set(df.columns) - set(remain_col))

    df_dropcol_bin_iv = pd.DataFrame(drop_col_bin_iv,columns = ['drop_col'])
    df_dropcol_bin_iv['DropReason'] = 'bin_iv<%s'%iv_limit
    df_dropcol_bin_iv['Count'] = len(drop_col_bin_iv)

    return remain_col,df_dropcol_bin_iv

def woe_iv_filter(df1,y='flagy',iv_limit = 0.02,kp_var = None):
    df = df1.copy()
    if kp_var:
        df = df.drop(columns = kp_var)
    # 筛选变量
    df_woe_iv = sc.iv(df,'flagy')
    iv_var = df_woe_iv[df_woe_iv.info_value >= iv_limit].variable.tolist()
    if kp_var:
        remain_col = iv_var + kp_var + [y]
    else:
        remain_col = iv_var + [y]

    drop_col_woe_iv = list(set(df.columns) - set(remain_col))

    df_dropcol_woe_iv = pd.DataFrame([i.replace("_woe", "") for i in drop_col_woe_iv],columns = ['drop_col'])
    df_dropcol_woe_iv['DropReason'] = 'woe_iv<%s'%iv_limit
    df_dropcol_woe_iv['Count'] = len(drop_col_woe_iv)

    return remain_col,df_dropcol_woe_iv

def object_var_del(df,num_limit = 10):
    # 删除类别过多的分类型变量
    object_cat_num = df.loc[:,df.dtypes == 'object'].apply(lambda x : len(x.value_counts())) # 变量类别数
    drop_col = object_cat_num[object_cat_num > num_limit].index.tolist() # 类别数大于阈值的变量
    df_result = df.drop(columns = drop_col)
    return df_result

def isna(data):    
    return(pd.isna(data).sum().index[pd.isna(data).sum().values>0])

def corr_iv(df1,corr_limit = 0.7,flag = "flagy"):
    df = df1.copy()
    corr = abs(df.drop(columns=[flag]).corr())
    corr_dict={}
    for i in range(corr.shape[0]-1):
        for j in range(corr.shape[0]-i-1):
            if corr.iloc[i,i+j+1] > corr_limit:
                corr_dict[(corr.index[i],corr.columns[i+j+1])] = corr.iloc[i,i+j+1]  
    iv_result = iv(df, flag)
    for name in corr_dict.keys():
        if name[0] in df.columns and name[1] in df.columns:
            name0_iv = float(iv_result[iv_result['variable'] == name[0]]['info_value'])
            name1_iv = float(iv_result[iv_result['variable'] == name[1]]['info_value'])
            if name0_iv > name1_iv:
                del df[name[1]]
            else:
                del df[name[0]]
    drop_col_corr = list(set(df1.drop(columns=[flag]).columns)-set(df.columns))
    df_drop_corr = pd.DataFrame([i.replace("_woe", "") for i in drop_col_corr],columns = ['drop_col'])
    df_drop_corr['DropReason'] = '相关性>%s'%corr_limit
    df_drop_corr['Count'] = len(drop_col_corr)
    print("相关性剔除变量：{}".format(drop_col_corr))

    return df,df_drop_corr

def var_corr(df,seuil=0.8):
    #按相关最多次的变量进行剔除，直至没有高于相关性阈值的变量
    corr = pd.DataFrame(df.corr().applymap(lambda x : abs(x) > seuil).sum().sort_values(ascending = False)).rename(columns={0:'cnt'}).reset_index()
    drop_col_corr = []
    a = True
    while a == True:
        col = corr[corr.cnt > 1]["index"].tolist()
        if len(col) > 0:
            drop_col_corr.append(col[0])
            print(drop_col_corr)
            corr = pd.DataFrame(df.drop(drop_col_corr,axis = 1).corr().applymap(lambda x : abs(x) > seuil).sum().sort_values(ascending = False)).rename(columns={0:'cnt'}).reset_index()
        else:
            a = False
    col_rest_corr=list(set(df.columns)-set(drop_col_corr))
        
    df_drop_corr = pd.DataFrame(drop_col_corr,columns = ['drop_col'])
    df_drop_corr['DropReason'] = '相关性>%s'%seuil
    df_drop_corr['Count'] = len(drop_col_corr)
    print("相关性剔除变量：{}".format(drop_col_corr))
    
    return col_rest_corr,df_drop_corr

def vif_select(df1,vif_limit = 3,flag = "flagy"):
    import statsmodels.stats.outliers_influence as oi
    import statsmodels.api as sm
    xs = np.array(sm.add_constant(df1.drop(columns=[flag])),
                  dtype=np.float)
    xs_name = ["const"] + df1.drop(
        columns=[flag]).columns.to_list()  # 需要求VIF的变量
    vif = pd.DataFrame([{
        "variable": xs_name[i],
        "vif": oi.variance_inflation_factor(xs, i)
    } for i in range(len(xs_name))])
    vif = vif[vif.variable != 'const']
    train_woe_copy = df1.copy()
    while 1:
        vif.sort_values(by='vif', ascending=False, inplace=True)
        if vif.iloc[0]['vif'] > vif_limit:
            vif.reset_index(drop=1, inplace=True)
            print('drop: ' + vif.iloc[0]['variable'] + ' vif: ' +
                  str(vif.iloc[0]['vif']))
            del train_woe_copy[vif.iloc[0]['variable']]
            vif.drop(index=[0], inplace=True)
            xs = np.array(sm.add_constant(train_woe_copy.drop(columns=[flag])),
                          dtype=np.float)
            xs_name = ["const"] + vif['variable'].values.tolist()  # 需要求VIF的变量
            vif = pd.DataFrame([{
                "variable": xs_name[i],
                "vif": oi.variance_inflation_factor(xs, i)
            } for i in range(len(xs_name))])
            vif = vif[vif.variable != 'const']
        else:
            break
    drop_col_vif = list(set(df1.drop(columns=[flag]).columns)-set(vif.variable))
    df_drop_vif = pd.DataFrame([i.replace("_woe", "") for i in drop_col_vif],columns = ['drop_col'])
    df_drop_vif['DropReason'] = 'vif>%s'%vif_limit
    df_drop_vif['Count'] = len(drop_col_vif)

    return vif,train_woe_copy,df_drop_vif

def pvalue_params_select(df,pvalue_limit = 0.05,flag = "flagy"):
    df1 = df.copy()
    while 1:
        model = sm.GLM(df1[flag], sm.add_constant(df1.drop(columns=[flag])), family = sm.families.Binomial()).fit()
        pvalue = model.pvalues
        pvalue.drop(index=['const'], inplace=True)
        pvalue.sort_values(inplace=True, ascending=False)
        if pvalue.iloc[0] > pvalue_limit:
            print('drop '+ pvalue.index[0][:-4] + ' P-value: ' + str(pvalue.iloc[0]))
            del df1[pvalue.index[0]]
        else:
            break

    while 1:
        model = sm.GLM(df1[flag], sm.add_constant(df1.drop(columns=[flag])), family = sm.families.Binomial()).fit()
        params = model.params
        params.drop(index=['const'], inplace=True)
        params.sort_values(inplace=True, ascending=True)
        if params.iloc[0] < 0:
            print('drop ' + params.index[0][:-4] + ' Params: ' + str(params.iloc[0]))
            del df1[params.index[0]]
        else:
            break
    drop_col_pvalue_params = list(set(df.drop(columns=[flag]).columns)-set(df1.drop(columns=[flag]).columns))
    df_drop_pvalue_params = pd.DataFrame([i.replace("_woe", "") for i in drop_col_pvalue_params],columns = ['drop_col'])
    df_drop_pvalue_params['DropReason'] = 'lr'#'p-value>%s'%pvalue_limit
    df_drop_pvalue_params['Count'] = len(drop_col_pvalue_params)

    return df1,df_drop_pvalue_params

def score_high(dt_grey,card,score_range=(300, 1000),tick=50,if_contain_grey = True,grey_value=0.5):
    """
    param dt_grey:含灰客户样本，有'flagy'
    param card:评分卡
    param score_range:分数区间
    
    """
    score = sdf_scorecard_ply(dt_grey, card)
    score[score <= score_range[0]] = score_range[0]
    score[score >= score_range[1]] = score_range[1] - 1
    score_total = pd.merge(score,dt_grey['flagy'],left_index = True,right_index = True)

    score_total["score"] = pd.cut(
            score_total.score,
            bins=range(score_range[0], score_range[1] + 1, tick),
            right=False,
        )
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    score_total["坏"] = score_total[1]
    if if_contain_grey:
        score_total["灰"] = score_total[grey_value]
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey_value]
    else:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    if if_contain_grey:
        del score_total[1], score_total[0],score_total[grey_value]
    else:
        del score_total[1], score_total[0]
    return score_total

def score_freq(dt_grey,card,score_range=(300, 1000),percent=5,if_contain_grey = True,grey_value=0.5):
    score = sdf_scorecard_ply(dt_grey, card)
    score[score <= score_range[0]] = score_range[0]
    score[score >= score_range[1]] = score_range[1] - 1
    score_total = pd.merge(score,dt_grey['flagy'],left_index = True,right_index = True)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1]
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = [int(i) for i in list(set(breaks))]
    breaks = sorted(breaks)
    breaks[-1] = 1000
    breaks[0] = 300
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    score_total["坏"] = score_total[1]
    if if_contain_grey:
        score_total["灰"] = score_total[grey_value]
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey_value]
    else:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    if if_contain_grey:
        del score_total[1], score_total[0],score_total[grey_value]
    else:
        del score_total[1], score_total[0]
    return score_total

def score_high1(dt_grey,score_var = "score",score_range=(300, 1000),tick=50,if_contain_grey = True,grey_value=0.5):

    score_total = dt_grey[[score_var,'flagy']]
    score_total.loc[score_total[score_var]<=score_range[0],score_var] = score_range[0]
    score_total.loc[score_total[score_var]>=score_range[1],score_var] = score_range[1]

    score_total["score"] = pd.cut(score_total.score,bins=range(score_range[0], score_range[1] + 1, tick),right=False,)
    score_total = (score_total.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0))
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    score_total["坏"] = score_total[1]
    if if_contain_grey:
        score_total["灰"] = score_total[grey_value]
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey_value]
    else:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (score_total["总"].sum() - score_total["总累计"])
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    if if_contain_grey:
        del score_total[1], score_total[0],score_total[grey_value]
    else:
        del score_total[1], score_total[0]
    return score_total

def score_freq1(dt_grey,score_var = "score",score_range=(300, 1000),percent=5,if_contain_grey = True,grey_value=0.5):
    score_total = dt_grey[[score_var,'flagy']]
    score_total.loc[score_total[score_var]<=score_range[0],score_var] = score_range[0]
    score_total.loc[score_total[score_var]>=score_range[1],score_var] = score_range[1]-1

    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = [int(i) for i in list(set(breaks))]
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (score_total.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0))
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    score_total["坏"] = score_total[1]
    if if_contain_grey:
        score_total["灰"] = score_total[grey_value]
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey_value]
    else:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (score_total["总"].sum() - score_total["总累计"])
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    if if_contain_grey:
        del score_total[1], score_total[0],score_total[grey_value]
    else:
        del score_total[1], score_total[0]
    return score_total

def myWOEbin(
    Data,
    Y,
    max_leaf_num=6, #max_leaf_num:分箱的最大箱数
    min_woe_box_percent=0.05,#min_woe_box_percent:叶节点最小样本量比例（仅占非空值的）
    min_woe_box_num_min=100,#min_woe_box_num_min:叶节点最小样本量；满足叶节点最小样本量比例，且满足叶节点最小样本量
    special_values=None,
    method = 'tree',):
    try:
        Data = Data.reset_index(drop=1)
        Y = Y.reset_index(drop=1)
    except Exception as e:
        print("输入数据非数据框格式，请调整为数据框！")

    # 每个分箱的数量不得小于总体取值样本的5%（可调整）
    min_woe_box_num = int(Data.shape[0] * min_woe_box_percent)
    if min_woe_box_num < min_woe_box_num_min:
        min_woe_box_num = min_woe_box_num_min

    var_num = len(Data.columns)

    # var_splitpoint用于存储每一个变量的分箱截点
    var_splitpoint = list(np.zeros([var_num, 1]))
    if special_values:
        special_values = list(special_values)
    else:
        special_values = []
    for i in range(0, var_num):
        # 非空的取值才进行决策树最优分箱
        temp_var = Data.iloc[:, i]
        NonNan_position = np.where(
            ~(temp_var.isna()) & ~(temp_var.isin(special_values))
        )[0]
        max_leaf = max_leaf_num
        # 最多分到6个叶子节点
        if method == 'tree':
            groupdt = tree.DecisionTreeClassifier(
                criterion="entropy",
                min_samples_leaf=min_woe_box_num,
                max_leaf_nodes=max_leaf,
            )

            groupdt.fit(
                np.array(Data.iloc[NonNan_position, i]).reshape(-1, 1),
                Y.iloc[NonNan_position],
            )
            dot_data = tree.export_graphviz(
                groupdt,
                out_file=None,
            )
            pattern = re.compile("<= (.*?)\\\\nentropy", re.S)
            split_num = re.findall(pattern, dot_data)
        elif method == 'freq':
            split_num = np.percentile(Data.iloc[NonNan_position, i], np.linspace(0,100, max_leaf, endpoint=False), 
                                     interpolation='linear')
            split_num = split_num[1:]

        splitpoint = [float(j) for j in split_num]
        final_splitpoint = sorted(list(set(splitpoint)))
        try:
            if Data.iloc[NonNan_position, i].min() >= final_splitpoint[0]:
                final_splitpoint = final_splitpoint[1:]
        except:
            pass
        var_splitpoint[i] = final_splitpoint

    var_splitpoint = pd.Series(var_splitpoint, index=Data.columns)
    return var_splitpoint

def single_woe_cal(data, y, SingleBreak, special_values=None):
    """
    data:df with 2 columns--single variable and the label
    y: column name of label"""
    print("data变量: {}".format(data.columns), data.shape)
    SingleBreak = sorted(list(set([-np.inf] + SingleBreak + [np.inf])))
    if y not in data.columns:
        raise Exception("label not found in data:", y)
    v = [i for i in data.columns.to_list() if i != y]
    if len(v) != 1:
        raise Exception("Input array must contain only one explanatory variable")
    # basic info
    total_num = data.shape[0]
    total_bad = data[y].sum()
    total_good = total_num - total_bad
    # CUT and group
    if not isinstance(special_values, list):
        special_values = [special_values]
    data1 = data[data[v[0]].isin(list(special_values) + [np.nan, None])]
    data = data[~data[v[0]].isin(list(special_values) + [np.nan, None])]

    tempdata = data.assign(bin=pd.cut(data[v[0]], SingleBreak, right=False))

    temptable = (
        tempdata.groupby("bin")
        .agg(["count", np.mean, np.sum])[y]
        .rename(columns={"count": "count", "mean": "badprob", "sum": "bad"})
    )

    # complete the bin description
    temptable["variable"] = v[0]
    temptable["count_distr"] = temptable["count"] / total_num
    temptable["good"] = temptable["count"] - temptable["bad"]
    temptable["breaks"] = SingleBreak[1:]
    temptable["bad_pct"] = temptable["bad"] / total_bad
    temptable["good_pct"] = (temptable["count"] - temptable["bad"]) / total_good
    temptable["woe"] = np.log(temptable["bad_pct"] / temptable["good_pct"])
    temptable['woe'] = temptable['woe'].replace(np.inf, 9999)
    temptable['woe'] = temptable['woe'].replace(-np.inf, -9999)
    temptable["bin_iv"] = (temptable["bad_pct"] - temptable["good_pct"]) * temptable[
        "woe"
    ]
    # 特殊值的相关分箱计算
    #     data1[v[0]] = data1[v[0]].astype(str)
    data1 = data1.fillna("nan")
    data1 = data1.rename(columns={v[0]: "bin"})
    temptable1 = (
        data1.groupby("bin")
        .agg(["count", np.mean, np.sum])[y]
        .rename(columns={"count": "count", "mean": "badprob", "sum": "bad"})
    )
    temptable1["variable"] = v[0]
    temptable1["count_distr"] = temptable1["count"] / total_num
    temptable1["good"] = temptable1["count"] - temptable1["bad"]
    temptable1["breaks"] = temptable1.index
    temptable1["bad_pct"] = temptable1["bad"] / total_bad
    temptable1["good_pct"] = (temptable1["count"] - temptable1["bad"]) / total_good
    temptable1["woe"] = np.log(temptable1["bad_pct"] / temptable1["good_pct"])
    temptable1['woe'] = temptable1['woe'].replace(np.inf, 9999)
    temptable1['woe'] = temptable1['woe'].replace(-np.inf, -9999)
    temptable1["bin_iv"] = (
        temptable1["bad_pct"] - temptable1["good_pct"]
    ) * temptable1["woe"]

    single_bin = pd.concat([temptable1, temptable], axis=0, sort=False)
    single_bin["total_iv"] = single_bin["bin_iv"].sum()
    single_bin.reset_index(inplace=True)
    single_bin = {
        v[0]: single_bin[
            [
                "variable",
                "bin",
                "count",
                "count_distr",
                "good",
                "bad",
                "badprob",
                "woe",
                "bin_iv",
                "total_iv",
                "breaks",
            ]
        ]
    }
    return single_bin

def sdf_woebin(
    data,
    y,
    breaks_list=None,
    max_leaf_num=6,
    min_woe_box_percent=0.01,
    min_woe_box_num_min=100,
    special_values=None,
    method = 'tree'):
    # Pandas Version
    """data: df-like form ,the original dataset
    y: str form, the name of label
    breaks_list: dict from , dict of break points"""
    # label check
    if y not in data.columns:
        raise Exception("label not found in data:", y)
    # basic info
    total_num = data.shape[0]
    total_bad = data[y].sum()
    total_good = total_num - total_bad
    # 决策树找分割点
    breaks = myWOEbin(
        data.drop(columns=y),
        data[y],
        max_leaf_num=max_leaf_num,
        min_woe_box_percent=min_woe_box_percent,
        min_woe_box_num_min=min_woe_box_num_min,
        special_values=special_values,
        method = method
        
    )
    breaks = breaks.to_dict()
    # 不应传入特殊值的dict
    if breaks_list:
        if isinstance(special_values, list):
            breaks_list = dict(
                [
                    (k, list(set(v) - set(special_values) - set(["nan"])))
                    for k, v in breaks_list.items()
                ]
            )
        else:
            breaks_list = dict(
                [
                    (k, list(set(v) - set([special_values]) - set(["nan"])))
                    for k, v in breaks_list.items()
                ]
            )
        breaks.update(breaks_list)
        # parallel process--calculating woe and iv, reforming
    parallel_res = Parallel(n_jobs=-1)(
        delayed(single_woe_cal)(
            data[[y, v]], y, breaks[v], special_values=special_values
        )
        for v in list(breaks)
    )
    # reforming the result: from tuple to dicts
    bins_dict = {}
    for b in parallel_res:
        bins_dict.update(b)
    return bins_dict

def _single_transform(series, trans_dict):
    series.rename(series.name + "_woe", inplace=True)
    normal_dict = {
        i: j for i, j in trans_dict.items() if type(i) == pd._libs.interval.Interval
    }
    special_dict = {
        i: j for i, j in trans_dict.items() if type(i) != pd._libs.interval.Interval
    }
    series1 = series[series.isin(list(special_dict) + [np.nan])]
    series2 = series[~series.isin(list(special_dict) + [np.nan])]
    #     try:
    #         special_dict.update(dict([(i, trans_dict[i]) for i in special_values]))
    #     except:
    #         print("Warning: No special_values get")
    series1 = series1.fillna("nan")
    result = pd.concat([series1.map(special_dict), series2.map(normal_dict)], axis=0)
    return result

def sdf_woebin_ply(data, bins, n_jobs=-1):
    """
    data:untransformed data
    Trans_dict: dict for transformation
    --format of Trans_dict:{'var_1':trans_dict
    --format of trans_dict:{interval(0,1):0.231,interval(1,3):0.501}
    n_jobs: nums of cpu"""
    #     Trans_dict = dict(
    #         zip(
    #             list(bins),
    #             [d["breaks"].values.tolist() for _, d in bins.items()],
    #         )
    #     )
    Woe_dict = dict(
        [
            (i, dict(j[["bin", "woe"]].to_dict(orient="split")["data"]))
            for i, j in bins.items()
        ]
    )
    datacopy = data.copy()
    keys = list(Woe_dict)
    # do some print
    DiffElement = list(set(keys) - set(data.columns))
    Untransformed = list(set(data.columns) - set(keys))
    try:
        data_untrans = data[Untransformed]
    except:
        data_untrans = pd.DataFrame()
    if len(DiffElement) != 0:
        raise Exception("These variables aren't included in data:", DiffElement)
    if len(Untransformed) != 0:
        if len(Untransformed) >= 5:
            print(
                "These variables will not be transformed:",
                Untransformed[:5],
                "list goes on",
            )
        else:
            print(
                "These variables will not be transformed:",
                Untransformed,
                "list goes on",
            )
    parallel_res = Parallel(n_jobs=n_jobs)(
        delayed(_single_transform)(data[v], Woe_dict[v]) for v in keys
    )
    data_transformed = pd.concat(parallel_res + [data_untrans], axis=1)
    return data_transformed

def sdf_woebin_plot(bins):
    plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
    plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
    figure = {}
    for col in bins:
        #         plt.figure(figsize=(100, 70))
        f, ax1 = plt.subplots(figsize=(8, 6))
        ax1.set_ylabel("占比")
        ax1.set_title(col + ", iv={}".format(round(bins[col].iloc[0]["total_iv"], 6)))
        plt.xticks(list(range(len(bins[col]))), bins[col]["bin"], rotation=45, fontsize=8)        
        ax1.bar(
            list(range(len(bins[col]))),
            bins[col]["count_distr"] * bins[col]["good"] / bins[col]["count"],
            label="占比",
            color=(24/254,192/254,196/254),)
        ax1.bar(
            list(range(len(bins[col]))),
            bins[col]["count_distr"] * bins[col]["bad"] / bins[col]["count"],
            bottom=bins[col]["count_distr"] * bins[col]["good"] / bins[col]["count"],
            color=(246/254,115/254,109/254),)
        for a, b in zip(range(len(bins[col])), bins[col]["count_distr"]):
            c = str(round(b * 100, 2)) + "%"
            plt.text(a, b * 1.02, c + ' '+ str(bins[col].loc[a, 'count']), ha="center", va="baseline", fontsize=10)
        plt.rcParams["axes.grid"] = False
        
        ax2 = ax1.twinx()
        ax2.set_ylabel("坏客率")
        ax2.plot(
            range(len(bins[col])),
            bins[col]["badprob"],
            label="坏客率",
            marker="o",
            markerfacecolor="white",
            markersize=8,
            color="blue",)  
        for a, b in zip(range(len(bins[col])), bins[col]["badprob"]):
            c = str(round(b * 100, 2)) + "%"
            ax2.text(a, b, c, ha="right", va="top", fontsize=10, color="blue")
        plt.xticks(
            list(range(len(bins[col]))), bins[col]["bin"], rotation=45, fontsize=8)
        ax1.legend(loc="upper right", bbox_to_anchor=(1, 0.9))
        ax2.legend(loc=1)
        figure.update({col: ax1})
    return figure

def mannual_breaks(pr_bins, data, ylabel="flagy", special_values=None):
    """INPUT:
        pr_bins: bins generated through auto-binning process
        data: data in original form
        ylabel: name of ylabel, in str form
    OUTPUT:
        all_breaklist
        droplist: variables that cannot be binned properly
    """

    import matplotlib.pyplot as plt

    xname = list(pr_bins)
    i = 0
    all_breaklist = {}
    droplist = []
    while i < (len(pr_bins)):
        b = xname[i]
        p1 = sdf_woebin_plot({b: pr_bins[b]})
        plt.show(p1)
        print(
            i,
            "/",
            len(pr_bins),
            "current splitting points:",
            list(pr_bins[b]["breaks"]),
        )
        print(("Adjust breaks for (%s)?\n 1.next\n 2.yes\n 3.back" % b))
        if_adj = input("1:next    2. yes    3.back  4.drop  5.quit\n")
        new_breaks = {}
        while if_adj == "2":
            print("pls enter the new breaks:")
            try:
                new_breaks_points = input().split(",")
                new_breaks = {b: sorted([float(x) for x in new_breaks_points])}
                bins_adj_temp = sdf_woebin(
                    data.loc[:, [b, ylabel]],
                    y=ylabel,
                    breaks_list=new_breaks,
                    special_values=special_values,
                )
                p2 = sdf_woebin_plot(bins_adj_temp)
                plt.show(p2)
                breaks_output = sorted(
                    list(bins_adj_temp[b]["breaks"]),
                    key=lambda x: -np.inf if isinstance(x, str) else x,
                )
                print("current splitting points:", breaks_output)
                print(("Adjust breaks for (%s)?\n 1.next\n 2.yes\n 3.back" % b))
                if_adj = input("1:save and next    2. yes    3.back  4.drop")
            except Exception as e:
                print(e)
                print("Error while adjusting", b)
                if_continue = input("to continue? \n 1.yes 2.next 3.quit")
                if if_continue == "1":
                    continue
                elif if_continue == "2":
                    if_adj == "1"
                elif if_continue == "3":
                    break
        if if_adj == "1":
            if b not in new_breaks.keys():
                all_breaklist[b] = list(pr_bins[b]["breaks"])
            else:
                all_breaklist[b] = breaks_output
            i += 1
            continue

        if if_adj == "3":
            if i == 0:
                print('This is the first plot, "back" option forbidden')
                break
            else:
                i -= 1
        if if_adj == "4":
            droplist.append(b)
            i += 1
        if if_adj == "5":
            break
    print("Mannual adjustment completed")
    return all_breaklist, droplist

def sdf_single_scorecard_ply(dx, cardx, x):
    card_dict = dict(cardx[["bin", "points"]].to_dict("split")["data"])
    card_dict_interval = {
        i: j for i, j in card_dict.items() if type(i) == pd._libs.interval.Interval
    }
    card_dict_special = {
        i: j for i, j in card_dict.items() if type(i) != pd._libs.interval.Interval
    }
    special_values = list(card_dict_special)
    dx_special = dx[dx.isin(special_values)]
    dx_inter = dx[~dx.isin(special_values)]
    score = pd.concat(
        [dx_special.map(card_dict_special), dx_inter.map(card_dict_interval)], axis=0
    )
    return score

def sdf_scorecard_ply(data, card, only_total_score=True, var_kp=None):
    cardx = pd.concat(card, ignore_index=True)
    dt = data.copy(deep=True)
    cols = list(cardx.variable.unique())
    cols.remove("basepoints")
    dt[cols] = dt[cols].fillna("nan")
    parallel_score = Parallel(n_jobs=-1)(
        delayed(sdf_single_scorecard_ply)(dt[v], cardx[cardx.variable == v], v)
        for v in cols
    )
    try:
        data_score = pd.concat(parallel_score + [data[var_kp]], axis=1)
        data_score["score"] = (data_score.drop(columns = var_kp).sum(axis=1)+ cardx[cardx.variable == "basepoints"]["points"].values[0])
    except:
        data_score = pd.concat(parallel_score, axis=1)
        data_score["score"] = (data_score.sum(axis=1)+ cardx[cardx.variable == "basepoints"]["points"].values[0])

    if only_total_score:
        data_score.drop(columns=cols, inplace=True)
    return data_score

def iv(dt, y, x = None, positive='bad|1', order=True):
    dt = dt.copy(deep=True)
    if isinstance(y, str):
        y = [y]
    if isinstance(x, str) and x is not None:
        x = [x]
    if x is not None: 
        dt = dt[y+x]    
    # x variable names
    
    xs = list(set(dt.columns) - set(y))
    # info_value
    ivlist = pd.DataFrame({
        'variable': xs,
        'info_value': [iv_xy(dt[i], dt[y[0]]) for i in xs]
    }, columns=['variable', 'info_value'])
    # sorting iv
    if order: 
        ivlist = ivlist.sort_values(by='info_value', ascending=False)
    return ivlist
    # ivlist = iv(dat, y='creditability')

def iv_xy(x, y):
    # good bad func
    def goodbad(df):
        names = {'good': (df['y']==0).sum(),'bad': (df['y']==1).sum()}
        return pd.Series(names)
    # iv calculation
    iv_total = pd.DataFrame({'x':x.astype('str'),'y':y}) \
      .fillna('missing') \
      .groupby('x') \
      .apply(goodbad) \
      .replace(0, 0.9) \
      .assign(
        DistrBad = lambda x: x.bad/sum(x.bad),
        DistrGood = lambda x: x.good/sum(x.good)
      ) \
      .assign(iv = lambda x: (x.DistrBad-x.DistrGood)*np.log(x.DistrBad/x.DistrGood)) \
      .iv.sum()
    # return iv
    return iv_total

def report(
    data_total,
    data_train,
    data_test,
    data_oot=None,
    y="flagy",
    breaks_list=None,
    filename="",
    points0=600,
    pdo=50,
    odds0=1 / 19,
    basepoints_eq0=False,
    special_values=None,
    grey=2,
    score_range=(300, 1000),
    tick=50,
    percent=5,
    **kwargs):
    """
    :param data_total: dataframe 所有训练样本，不含验证集，包含入模变量、标签及申请日期，申请日期最好为日期格式，有灰客户样本
    :param data_train: dataframe 训练集，只包含入模变量及标签,不含灰客户
    :param data_test: dataframe 测试集，只包含入模变量及标签， 不含灰客户
    :param data_oot: dataframe or None 验证集，默认为None，不含灰客户
    :param y: str 标签名，默认为'flagy'
    :param breaks_list: dict or None 入模变量分箱节点，如{'a':[-inf,1,5,10, inf], 'b': [-inf,2,6,9, inf], ...}
    :param filename: str 报告名，默认为''，输出名称自动加'report_报告生成日期'后缀，如'反欺诈_report20201229113158'
    :param points0: int 基准分，默认600
    :param pdo: int pdo，默认50
    :param odds: float 坏账比率， 默认1/19
    :param basepoints_eq0: Bool 是否使得截距项为0，值为True时，截距项评分平分到每个入模变量评分上，为False时不平分，默认为False
    :param special_values: None or list 变量分箱时设置的特殊值，特殊值单独为一箱，默认为None，即缺失值为特殊值
    :param grey: int, float or str 灰客户的取值标识，默认取2
    :param score_range: tuple 评分的上下限，如 (300, 1000)
    :param tick: int or float 评分分布的分数间隔，默认为50
    :param percent: int or float 评分等频分布的分位数间隔，默认为5，即5%分位数
    :param kwargs: 其他变量,如user_date='user_date'：申请日期名称，出现在data_total中，主要用于报告第2部分样本分析
    :return: 返回1
    """
    # 文件名
    filename = filename + 'report_' + dtt.datetime.now().strftime('%Y-%m-%d-%H-%M')
    # 分箱计算woe
    if "user_date" in kwargs:
        var_final = data_train.drop(columns=[y, 'user_date']).columns.to_list()
    else:
        var_final = data_train.drop(columns=[y]).columns.to_list()

    bins = sdf_woebin(
        data_train[var_final + [y]],
        y,
        breaks_list=breaks_list,
        max_leaf_num=6,
        min_woe_box_percent=0.01,
        min_woe_box_num_min=100,
        special_values=special_values,
    )
    bins_test = sdf_woebin(
        data_test[var_final + [y]],
        y,
        breaks_list=breaks_list,
        max_leaf_num=6,
        min_woe_box_percent=0.01,
        min_woe_box_num_min=100,
        special_values=special_values,
    )
    bins_train1 = pd.concat(bins, ignore_index=True)
    bins_test1 = pd.concat(bins_test, ignore_index=True)
    try:
        bins_oot = sdf_woebin(
            data_oot[var_final + [y]],
            y,
            breaks_list=breaks_list,
            max_leaf_num=6,
            min_woe_box_percent=0.01,
            min_woe_box_num_min=100,
            special_values=special_values,
        )
        oot_woe = sdf_woebin_ply(data_oot[var_final + [y]], bins)
        bins_oot1 = pd.concat(bins_oot, ignore_index=True)
    except:
        pass
    train_woe = sdf_woebin_ply(data_train[var_final + [y]], bins)
    test_woe = sdf_woebin_ply(data_test[var_final + [y]], bins)
    lr = LogisticRegression(penalty="none", solver="newton-cg", n_jobs=-1) 
    lr.fit(train_woe.drop(columns=[y]), train_woe[y])
    card = sc.scorecard(
        bins,
        lr,
        var_final,
        points0=points0,
        pdo=pdo,
        odds0=odds0,
        basepoints_eq0=basepoints_eq0,
    )
    # 进行excel报告内容整理
    table = pd.ExcelWriter(filename + ".xlsx", engine="xlsxwriter")
    # ------------------------------------------------------------------------------------------------------------------
    # 目录页
    sheet = pd.DataFrame(
        columns=["编号", "中文简称", "英文简称", "内容"],
        data=[
            ["1", "模型使用说明", "Model_Explain", "模型使用说明"],
            ["2", "原始数据统计", "Original_Stat", "原始数据统计"],
            ["3", "衍生特征构造", "Var_derivation", "衍生特征构造"],
            ["4", "数据预处理-格式转换", "Data_Pre_Format", "数据预处理-格式转换"],
            ["5", "候选变量", "Candidate_Vars", "经IV筛选、Lasso筛选、手动筛选后的剩余变量"],
            ["6", "模型参数", "Model_Params", "模型参数"],
            ["7", "变量相关系数", "VarSelect_Corr", "入模变量相关系数"],
            ["8", "模型区分度评估", "Model_Disc", "模型区分度评估"],
            ["9", "变量分箱", "Var_bin", "变量分箱"],
            ["10", "模型评分卡", "Scorecard", "变量分数及评分参数设定"],
            ["11", "单变量稳定性", "Var_Stab", "单变量稳定性评估"],
            ["12", "模型稳定性评估", "Model_Stab", "模型稳定性评估"],
            ["13", "样本风险评分分布", "Model_Score", "模型评分及风险表现"],
            ["14", "评分决策表", "Decision_table", "不同评分分段的通过率、违约率提升"],
        ],
    )
    sheet.to_excel(table, sheet_name="目录", startrow=0, startcol=0, index=False)
    # -------------------------------------------------------------------------------------------------------------------
    # 1.模型使用说明页
    head = pd.DataFrame(columns=["返回目录"])
    sheet1 = pd.DataFrame(
        index=["版本名称", "模型类型", "客群种类", "该版本更新时间", "开发人员", "建模样本数据量", "模型变量数量", "核心算法"],
        columns=["内容"],
    )
    head.to_excel(table, sheet_name="1.模型使用说明", startrow=0, index=False)
    sheet1.to_excel(table, sheet_name="1.模型使用说明", startrow=1)
    # -------------------------------------------------------------------------------------------------------------------
    # 2.原始数据统计页
    head2_1 = pd.DataFrame(columns=["一、数据来源"])
    sheet2_1 = pd.DataFrame(
        index=[
            "机构",
            "产品类型",
            "业务开展时间",
            "引流渠道",
            "额度区间",
            "期数范围",
            "存量客户数量",
            "日进件量",
            "平均通过率",
            "审批流程",
            "审批使用数据",
        ],
        columns=["内容"],
    )
    head2_2 = pd.DataFrame(columns=["二、数据概要"])
    sheet2_2 = pd.DataFrame(
        index=[
            "客群描述",
            "观察期",
            "表现期",
            "原始样本时间",
            "原始样本量",
            "建模样本时间",
            "建模样本量",
            "验证样本时间",
            "验证样本量",
        ],
        columns=["内容"],
    )

    head2_3 = pd.DataFrame(columns=["三、好坏客户定义"])
    sheet2_3 = pd.DataFrame(columns=["客户类型", "定义方式", "样本量", "好坏客户定义描述"])
    sheet2_3["客户类型"] = ["坏客户", "灰客户", "好客户"]

    head2_4 = pd.DataFrame(columns=["四、建模数据统计情况"])
    sheet2_4 = pd.DataFrame(columns=["年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    if "user_date" in kwargs:
        data_total['user_date'] = pd.to_datetime(data_total['user_date'], errors="coerce")
        temp = data_total.copy()
        temp["年"] = temp['user_date'].apply(lambda x: str(x.year))
        temp["月"] = temp['user_date'].apply(lambda x: str(x.month) + "月")
        temp = (
            temp[temp[y] != grey]
            .groupby(["年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        temp["比例"] = temp["数量"] / temp["数量"].sum()
        temp["坏账率"] = temp["坏数量"] / temp["数量"]
        temp["平均坏账率"] = temp["坏数量"].sum() / temp["数量"].sum()
        temp = temp[["年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_4 = temp.copy()

    head2_5 = pd.DataFrame(columns=["五、建模数据选取"])
    sheet2_5 = pd.DataFrame(columns=["类型", "年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    sheet2_5["类型"] = ["训练", "测试", "验证"]
    if "user_date" in kwargs:
        data_train["user_date"] = pd.to_datetime(data_train["user_date"])
        data_test["user_date"] = pd.to_datetime(data_test["user_date"])
        data_train["类型"] = "训练"
        data_test["类型"] = "测试"
        try:
            data_oot["类型"] = "验证"
            data_oot["user_date"] = pd.to_datetime(data_oot["user_date"])
        except:
            pass
        data_merge = pd.concat([data_train, data_test, data_oot], axis=0, sort=False)
        data_merge["年"] = data_merge['user_date'].apply(lambda x: str(x.year))
        data_merge["月"] = data_merge['user_date'].apply(lambda x: str(x.month) + "月")
        data_merge = (
            data_merge.groupby(["类型", "年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        data_merge["比例"] = data_merge["数量"] / data_merge["数量"].sum()
        data_merge["坏账率"] = data_merge["坏数量"] / data_merge["数量"]
        data_merge["平均坏账率"] = data_merge["坏数量"].sum() / data_merge["数量"].sum()
        data_merge = data_merge[["类型","年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_5 = data_merge.copy()

    head2_6 = pd.DataFrame(columns=["六、数据集划分"])
    sheet2_6 = pd.DataFrame(columns=["数据量", "坏样本", "坏账率"], index=["训练集", "测试集", "验证集"])
    try:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], data_oot.shape[0]]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), data_oot[y].sum()]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]
    except:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], 0]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), 0]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]

    head.to_excel(table, sheet_name="2.原始数据统计", startrow=0, index=False)
    head2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=1, index=False)
    sheet2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=3, startcol=1)
    head2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=16, index=False)
    sheet2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=18, startcol=1)
    head2_3.to_excel(table, sheet_name="2.原始数据统计", startrow=29, index=False)
    sheet2_3.to_excel(
        table, sheet_name="2.原始数据统计", startrow=31, startcol=1, index=False
    )
    head2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=36, index=False)
    if sheet2_4.shape[0] == 0:
        sheet2_4.to_excel(
            table, sheet_name="2.原始数据统计", startrow=38, startcol=1, index=False
        )
    else:
        sheet2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=38, startcol=1)
    row_number = sheet2_4.shape[0] + 38 + 17
    head2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number, index=False)
    sheet2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number + 2, startcol=1)
    row_number1 = row_number + 2 + sheet2_5.shape[0] + 2
    head2_6.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number1, index=False)
    sheet2_6.to_excel(
        table, sheet_name="2.原始数据统计", startrow=row_number1 + 2, startcol=1
    )
    # ---------------------------------------------------------------------------------------------------
    # 3.衍生变量构造
    sheet3 = pd.DataFrame(columns=["序号", "模块", "变量", "中文名", "数据来源", "衍生逻辑"])

    head.to_excel(table, sheet_name="3.衍生变量构造", index=False)
    sheet3.to_excel(table, sheet_name="3.衍生变量构造", startrow=2, index=False)
    # ----------------------------------------------------------------------------------------------------
    # 4.数据预处理
    sheet4 = pd.DataFrame(columns=["序号", "变量", "数据源", "变量类型", "编码(转换)格式", "举例"])
    head.to_excel(table, sheet_name="4.数据预处理", index=False)
    sheet4.to_excel(table, sheet_name="4.数据预处理", startrow=2, index=False)
    # -----------------------------------------------------------------------------------------------------
    # 5.候选变量
    title = pd.DataFrame(columns=["候选变量"])
    try:
        sheet5 = pd.read_csv("iv_missing_rate.csv", encoding="utf-8")
        sheet5.columns = ["变量", "IV值", "缺失值占比"]
        sheet5.sort_values(by="IV值", ascending=False, inplace=True)
        sheet5["解释"] = ""
        sheet5["序号"] = list(range(1, sheet5.shape[0] + 1))
        sheet5 = sheet5[["序号", "变量", "解释", "IV值", "缺失值占比"]]
    except:
        sheet5 = pd.DataFrame(columns=["序号", "变量", "解释", "IV值", "缺失值占比"])

    tips = pd.DataFrame(
        columns=["tip"], data=["计算变量分箱IV值，选择IV值>=0.02的变量", "其中重要经验变量单独挑回"]
    )
    head.to_excel(table, sheet_name="5.候选变量", index=False)
    title.to_excel(table, sheet_name="5.候选变量", index=False, startrow=1)
    sheet5.to_excel(table, sheet_name="5.候选变量", index=False, startrow=2)
    tips.to_excel(
        table, sheet_name="5.候选变量", index=False, header=False, startrow=3, startcol=6
    )
    # -------------------------------------------------------------------------------------------------------
    # 6.模型参数
    # 统计检验
    model = sm.GLM(
        train_woe[y],
        exog=sm.add_constant(train_woe.drop(columns=["flagy"])),
        family=sm.families.Binomial(),
    ).fit()
    stats = pd.concat(
        [model.params, model.bse, model.tvalues, model.pvalues], axis=1, sort=False
    )
    stats.reset_index(drop=False, inplace=True)
    stats.columns = ["变量", "估计", "标准误差", "Z-Value", "P-Value"]
    stats["变量"] = stats["变量"].apply(lambda x: x.replace("_woe", ""))
    stats.set_index("变量", inplace=True)
    stats["序号"] = list(range(stats.shape[0]))
    try:
        iv_missing_rate = sheet5[sheet5.变量.isin(var_final)]
        del iv_missing_rate["序号"]
    except:
        iv_missing_rate = pd.DataFrame(columns=["变量", "解释", "IV值", "缺失值占比"])
    iv_missing_rate.set_index("变量", inplace=True)
    # VIF值
    xs = np.array(sm.add_constant(train_woe.drop(columns=[y])), dtype=np.float)
    xs_name = ["const"] + train_woe.drop(columns=[y]).columns.to_list()  # 需要求VIF的变量
    vif = pd.DataFrame(
        [
            {"变量": xs_name[i], "VIF": oi.variance_inflation_factor(xs, i)}
            for i in range(len(xs_name))
        ]
    )
    vif = vif[vif.变量 != "const"]
    vif["变量"] = vif["变量"].apply(lambda x: x.replace("_woe", ""))
    vif.set_index("变量", inplace=True)
    sheet6 = pd.concat([stats, vif, iv_missing_rate], axis=1)
    sheet6.reset_index(inplace=True)
    sheet6.rename(columns={"index": "变量"}, inplace=True)
    sheet6 = sheet6[
        ["序号", "变量", "解释", "估计", "标准误差", "Z-Value", "P-Value", "VIF", "IV值", "缺失值占比"]
    ]
    sheet6.sort_values(by="序号", ascending=True, inplace=True)

    head.to_excel(table, sheet_name="6.模型参数", index=False)
    title = pd.DataFrame(columns=["逻辑回归结果"])
    title.to_excel(table, sheet_name="6.模型参数", index=False, startrow=1)
    sheet6.to_excel(table, sheet_name="6.模型参数", index=False, startrow=2)
    # ------------------------------------------------------------------------------------------------
    # 7.变量相关系数
    sheet7_1 = pd.DataFrame(columns=["序号", "分箱方法"], data=[["1", "最优分箱"], ["2", "手动分箱"]])
    sheet7_2 = pd.DataFrame(
        np.corrcoef(train_woe.drop(columns=[y]), rowvar=False),
        columns=train_woe.drop(columns=[y]).columns,
        index=train_woe.drop(columns=[y]).columns,
    )
    title1 = pd.DataFrame(columns=["建模过程中所使用的分箱方法"])
    title2 = pd.DataFrame(columns=["多变量分析——相关系数"])
    title3 = pd.DataFrame(columns=["Pearson Correlation Coefficient"])
    head.to_excel(table, sheet_name="7.变量相关系数", index=False)
    title1.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=2)
    sheet7_1.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=3)
    title2.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=10)
    title3.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=11)
    sheet7_2.to_excel(table, sheet_name="7.变量相关系数", startrow=12)
    # ---------------------------------------------------------------------------------------------------
    # 8.模型区分度评估
    title = pd.DataFrame(columns=["模型区分度评估"])
    sheet8 = pd.DataFrame(columns=["评估指标", "训练集", "测试集", "验证集"])
    sheet8["评估指标"] = ["KS", "AUC"]
    # 计算KS、AUC
    # 预测概率
    train_pred = lr.predict_proba(train_woe.drop(columns=[y]))[:, 1]
    test_pred = lr.predict_proba(test_woe.drop(columns=[y]))[:, 1]
    try:
        oot_pred = lr.predict_proba(oot_woe.drop(columns=[y]))[:, 1]
    except:
        pass
    # 评估
    train_perf = sc.perf_eva(train_woe.flagy, train_pred, title="train")
    test_perf = sc.perf_eva(test_woe.flagy, test_pred, title="test")
    train_perf["pic"].savefig("train_KS_AUC.png", bbox_inches="tight")
    test_perf["pic"].savefig("test_KS_AUC.png", bbox_inches="tight")
    sheet8["训练集"] = [train_perf["KS"], train_perf["AUC"]]
    sheet8["测试集"] = [test_perf["KS"], test_perf["AUC"]]
    try:
        oot_perf = sc.perf_eva(oot_woe.flagy, oot_pred, title="oot")
        oot_perf["pic"].savefig("oot_KS_AUC.png", bbox_inches="tight")
        sheet8["验证集"] = [oot_perf["KS"], oot_perf["AUC"]]
    except:
        pass
    title1 = pd.DataFrame(
        columns=[
            "此次建模，训练样本KS={}，AUC={}，模型结果较理想，模型对好坏客户具有很好的区分度，且模型较稳定，达到建模预期目标".format(
                train_perf["KS"], train_perf["AUC"]
            )
        ]
    )
    title2 = pd.DataFrame(columns=["训练集", "KS={}".format(train_perf["KS"])])
    title3 = pd.DataFrame(columns=["测试集", "KS={}".format(test_perf["KS"])])
    title1.to_excel(table, sheet_name="8.模型区分度评估", index=False, startrow=8)
    title2.to_excel(table, sheet_name="8.模型区分度评估", index=False, startrow=10, startcol=3)
    title3.to_excel(
        table, sheet_name="8.模型区分度评估", index=False, startrow=10, startcol=11
    )
    try:
        title4 = pd.DataFrame(columns=["验证集", "KS={}".format(oot_perf["KS"])])
        title4.to_excel(
            table, sheet_name="8.模型区分度评估", index=False, startrow=10, startcol=19
        )
    except:
        pass

    head.to_excel(table, sheet_name="8.模型区分度评估", index=False)
    sheet8.to_excel(table, sheet_name="8.模型区分度评估", index=False, startrow=2)
    # 曲线图
    sheet = table.book.sheetnames["8.模型区分度评估"]
    sheet.insert_image("A12", "train_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    sheet.insert_image("I12", "test_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    try:
        sheet.insert_image("Q12", "oot_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    except:
        pass
    # -----------------------------------------------------------------------------------------------------
    # 9.变量分箱
    title = pd.DataFrame(columns=["模型变量分箱及打分"])
    sheet9 = pd.merge(
        bins_train1[["variable", "bin", "count_distr", "badprob"]],
        bins_test1[["variable", "bin", "count_distr", "badprob"]],
        how="outer",
        on=["variable", "bin"],
    )
    sheet9["解释"] = ""
    try:
        sheet9 = pd.merge(
            sheet9,
            bins_oot1[["variable", "bin", "count_distr", "badprob"]],
            how="outer",
            on=["variable", "bin"],
        )
        sheet9.rename(
            columns={
                "count_distr_x": "训练集区间占比",
                "count_distr_y": "测试集区间占比",
                "count_distr": "验证集区间占比",
                "badprob_x": "训练集区间坏客率",
                "badprob_y": "测试集区间坏客率",
                "badprob": "验证集区间坏客率",
            },
            inplace=True,
        )
        title1 = pd.DataFrame(
            columns=[
                "序号",
                "名称",
                "解释",
                "分箱",
                "训练集区间占比",
                "训练集区间坏客率",
                "测试集区间占比",
                "测试集区间坏客率",
                "验证集区间占比",
                "验证集区间坏客率",
            ]
        )
        sheet9 = sheet9[
            [
                "variable",
                "解释",
                "bin",
                "训练集区间占比",
                "训练集区间坏客率",
                "测试集区间占比",
                "测试集区间坏客率",
                "验证集区间占比",
                "验证集区间坏客率",
            ]
        ]
        oot_pict = sdf_woebin_plot(bins_oot)
    except:
        sheet9.rename(
            columns={
                "count_distr_x": "训练集区间占比",
                "count_distr_y": "测试集区间占比",
                "badprob_x": "训练集区间坏客率",
                "badprob_y": "测试集区间坏客率",
            },
            inplace=True,
        )
        title1 = pd.DataFrame(
            columns=[
                "序号",
                "名称",
                "解释",
                "分箱",
                "训练集区间占比",
                "训练集区间坏客率",
                "测试集区间占比",
                "测试集区间坏客率",
            ]
        )
        sheet9 = sheet9[
            ["variable", "解释", "bin", "训练集区间占比", "训练集区间坏客率", "测试集区间占比", "测试集区间坏客率"]
        ]

    head.to_excel(table, sheet_name="9.变量分箱", index=False)
    title.to_excel(table, sheet_name="9.变量分箱", index=False, startrow=1)
    title1.to_excel(table, sheet_name="9.变量分箱", index=False, startrow=2)
    sheet9.to_excel(
        table, sheet_name="9.变量分箱", index=False, startrow=5, startcol=1, header=False
    )
    sheet = table.book.sheetnames["9.变量分箱"]
    train_pict = sdf_woebin_plot(bins)
    test_pict = sdf_woebin_plot(bins_test)

    i = 3
    for pict in list(sheet9.variable.unique()):
        train_pict[pict].get_figure().savefig(
            "train_" + pict + ".png", bbox_inches="tight"
        )
        sheet.insert_image(
            "L" + str(i), "train_" + pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
        )
        i = i + 20
    i = 3
    for pict in list(sheet9.variable.unique()):
        test_pict[pict].get_figure().savefig(
            "test_" + pict + ".png", bbox_inches="tight"
        )
        sheet.insert_image(
            "T" + str(i), "test_" + pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
        )
        i = i + 20
    try:
        i = 3
        for pict in list(sheet9.variable.unique()):
            oot_pict[pict].get_figure().savefig(
                "oot_" + pict + ".png", bbox_inches="tight"
            )
            sheet.insert_image(
                "AB" + str(i), "oot_" + pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
            )
            i = i + 20
    except:
        pass
    # ----------------------------------------------------------------------------------------------------------
    # 10.模型评分卡
    title1 = pd.DataFrame(columns=["模型变量分箱及打分"])
    title2 = pd.DataFrame(columns=["序号", "名称", "变量解释", "分箱", "系数", "woe", "评分"])
    title2["序号"] = ["0"]
    title2["名称"] = ["Intercept"]
    title2["变量解释"] = ["截距"]
    sheet10 = bins_train1[["variable", "bin", "woe"]]
    card_plain = pd.concat(card, ignore_index=True)
    sheet10 = pd.merge(
        sheet10, sheet6[["变量", "估计"]], how="inner", left_on="variable", right_on="变量"
    )
    sheet10 = pd.merge(sheet10, card_plain, how="right", on=["variable", "bin"])
    sheet10["解释"] = ""
    sheet10 = sheet10[["variable", "解释", "bin", "估计", "woe", "points"]]
    sheet10.columns = ["变量", "解释", "分箱", "系数", "woe", "评分"]
    title3 = pd.DataFrame(columns=["评分卡变量贡献度"])
    sheet10_1 = (
        card_plain.groupby("variable")["points"]
        .agg([max, min])
        .rename(columns={"max": "最大分值", "min": "最小分值"})
    )
    sheet10_1["贡献度"] = (sheet10_1["最大分值"] - sheet10_1["最小分值"]) / (
        sheet10_1["最大分值"] - sheet10_1["最小分值"]
    ).sum()
    sheet10_1["中文名称"] = ""
    sheet10_1.reset_index(inplace=True)
    sheet10_1 = sheet10_1[["variable", "中文名称", "最大分值", "最小分值", "贡献度"]]
    title4 = pd.DataFrame(columns=["评分卡刻度计算方法"])
    sheet10_2 = pd.DataFrame(
        {"a": ["odds0", "basepoints", "pdo"], "b": [odds0, points0, pdo]}
    )
    head.to_excel(table, sheet_name="10.模型评分卡", index=False)
    title1.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=1)
    title2.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=2)
    sheet10.to_excel(
        table, sheet_name="10.模型评分卡", index=False, startrow=4, startcol=1, header=False
    )
    title3.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=1, startcol=8)
    sheet10_1.to_excel(
        table, sheet_name="10.模型评分卡", index=False, startrow=2, startcol=8
    )
    title4.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=1, startcol=14)
    sheet10_2.to_excel(
        table, sheet_name="10.模型评分卡", index=False, startrow=2, startcol=14
    )
    # -------------------------------------------------------------------------------------------------------------------------
    # 11.单变量稳定性评估
    head.to_excel(table, sheet_name="11.单变量稳定性评估", index=False)
    title1 = pd.DataFrame(columns=["单变量稳定性评估"])
    title1.to_excel(table, sheet_name="11.单变量稳定性评估", index=False, startrow=1)
    title1.to_excel(
        table, sheet_name="11.单变量稳定性评估", index=False, startrow=1, startcol=9
    )
    bins_traintest = pd.merge(
        bins_train1[["variable", "bin", "count", "count_distr"]],
        bins_test1[["variable", "bin", "count", "count_distr"]],
        how="outer",
        on=["variable", "bin"],
    )
    bins_traintest["PSIvar"] = (
        bins_traintest["count_distr_x"] - bins_traintest["count_distr_y"]
    ) * np.log(bins_traintest["count_distr_x"] / bins_traintest["count_distr_y"])
    bins_traintest = bins_traintest.assign(
        PSI_ALL=bins_traintest.groupby("variable")["PSIvar"].transform(sum)
    )
    bins_traintest = bins_traintest[
        [
            "variable",
            "bin",
            "count_x",
            "count_distr_x",
            "count_y",
            "count_distr_y",
            "PSIvar",
            "PSI_ALL",
        ]
    ]
    bins_traintest.rename(
        columns={
            "variable": "变量名称",
            "bin": "分箱",
            "count_x": "样本量(训练)",
            "count_distr_x": "占比(训练)",
            "count_y": "样本量(测试)",
            "count_distr_y": "占比(测试)",
        },
        inplace=True,
    )
    bins_traintest.to_excel(table, sheet_name="11.单变量稳定性评估", index=False, startrow=3)
    try:
        bins_trainoot = pd.merge(
            bins_train1[["variable", "bin", "count", "count_distr"]],
            bins_oot1[["variable", "bin", "count", "count_distr"]],
            how="outer",
            on=["variable", "bin"],
        )
        bins_trainoot["PSIvar"] = (
            bins_trainoot["count_distr_x"] - bins_trainoot["count_distr_y"]
        ) * np.log(bins_trainoot["count_distr_x"] / bins_trainoot["count_distr_y"])
        bins_trainoot = bins_trainoot.assign(
            PSI_ALL=bins_trainoot.groupby("variable")["PSIvar"].transform(sum)
        )
        bins_trainoot = bins_trainoot[
            [
                "variable",
                "bin",
                "count_x",
                "count_distr_x",
                "count_y",
                "count_distr_y",
                "PSIvar",
                "PSI_ALL",
            ]
        ]
        bins_trainoot.rename(
            columns={
                "variable": "变量名称",
                "bin": "分箱",
                "count_x": "样本量(训练)",
                "count_distr_x": "占比(训练)",
                "count_y": "样本量(验证)",
                "count_distr_y": "占比(验证)",
            },
            inplace=True,
        )
        bins_trainoot.to_excel(
            table, sheet_name="11.单变量稳定性评估", index=False, startrow=3, startcol=11
        )
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 12.模型稳定性评估
    title1 = pd.DataFrame(columns=["1.训练&测试"])
    title2_1 = pd.DataFrame(columns=["等间距", "模型样本量分布评估"])
    title2_2 = pd.DataFrame(columns=["等频", "模型样本量分布评估"])
    head.to_excel(table, sheet_name="12.模型稳定性评估", index=False)
    title1.to_excel(table, sheet_name="12.模型稳定性评估", index=False, startrow=1)
    title2_1.to_excel(table, sheet_name="12.模型稳定性评估", index=False, startrow=2)
    title2_2.to_excel(
        table, sheet_name="12.模型稳定性评估", index=False, startrow=2, startcol=11
    )
    score_train = sdf_scorecard_ply(data_train, card, var_kp=[y])
    score_test = sdf_scorecard_ply(data_test, card, var_kp=[y])
    try:
        score_oot = sdf_scorecard_ply(data_oot, card, var_kp=[y])
    except:
        pass
    # 生成表格--------------
    # 训练集&测试集-------
    # 等高分布-----
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", y]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df.sort_index(ascending=True, inplace=True)
    df["训练样本量"] = df[1] + df[0]
    df["训练集占比"] = df["训练样本量"] / df["训练样本量"].sum()
    df["训练坏客户数"] = df[1]
    df["训练坏客户占比"] = df["训练坏客户数"] / df["训练坏客户数"].sum()
    del df[0], df[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1["score"] = pd.cut(
        df1.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet12_1 = df.merge(df1, how="outer", on="score")
    sheet12_1 = sheet12_1.fillna(0)
    sheet12_1["psi"] = (sheet12_1["训练集占比"] - sheet12_1["测试集占比"]) * np.log(
        sheet12_1["训练集占比"] / sheet12_1["测试集占比"]
    )
    sheet12_1["psi_bad"] = (sheet12_1["训练坏客户占比"] - sheet12_1["测试坏客户占比"]) * np.log(
        sheet12_1["训练坏客户占比"] / sheet12_1["测试坏客户占比"]
    )

    # 等频分布------
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt.sort_values(by="score", ascending=True, inplace=True)
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(dt.score.values, percent_list)
    breaks = [int(i) for i in list(set(breaks))]
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt.sort_index(ascending=True, inplace=True)
    dt["训练样本量"] = dt[1] + dt[0]
    dt["训练集占比"] = dt["训练样本量"] / dt["训练样本量"].sum()
    dt["训练坏客户数"] = dt[1]
    dt["训练坏客户占比"] = dt["训练坏客户数"] / dt["训练坏客户数"].sum()
    del dt[0], dt[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1.sort_values(by="score", ascending=True, inplace=True)
    df1["score"] = pd.cut(df1.score, bins=breaks, right=False)
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet12_2 = dt.merge(df1, how="outer", on="score")
    sheet12_2 = sheet12_2.fillna(0)
    sheet12_2["psi"] = (sheet12_2["训练集占比"] - sheet12_2["测试集占比"]) * np.log(
        sheet12_2["训练集占比"] / sheet12_2["测试集占比"]
    )
    sheet12_2["psi_bad"] = (sheet12_2["训练坏客户占比"] - sheet12_2["测试坏客户占比"]) * np.log(
        sheet12_2["训练坏客户占比"] / sheet12_2["测试坏客户占比"]
    )

    sheet12_1.to_excel(table, sheet_name="12.模型稳定性评估", startrow=4)
    sheet12_2.to_excel(table, sheet_name="12.模型稳定性评估", startrow=4, startcol=12)
    row_number = max(sheet12_2.shape[0], sheet12_1.shape[0]) + 4 + 20 + 2

    # 有验证集情况
    try:
        # 等高
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(
            df2.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet12_1 = df.merge(df2, how="outer", on="score")
        sheet12_1 = sheet12_1.fillna(0)
        sheet12_1["psi"] = (sheet12_1["训练集占比"] - sheet12_1["验证集占比"]) * np.log(
            sheet12_1["训练集占比"] / sheet12_1["验证集占比"]
        )
        sheet12_1["psi_bad"] = (sheet12_1["验证坏客户占比"] - sheet12_1["验证坏客户占比"]) * np.log(
            sheet12_1["验证坏客户占比"] / sheet12_1["验证坏客户占比"]
        )
        # 等频
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(df2.score, bins=breaks, right=False)
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet12_2 = dt.merge(df2, how="outer", on="score")
        sheet12_2 = sheet12_2.fillna(0)
        sheet12_2["psi"] = (sheet12_2["训练集占比"] - sheet12_2["验证集占比"]) * np.log(
            sheet12_2["训练集占比"] / sheet12_2["验证集占比"]
        )
        sheet12_2["psi_bad"] = (sheet12_2["训练坏客户占比"] - sheet12_2["验证坏客户占比"]) * np.log(
            sheet12_2["训练坏客户占比"] / sheet12_2["验证坏客户占比"]
        )
        title1 = pd.DataFrame(columns=["2.训练&验证"])
        title1.to_excel(
            table, sheet_name="12.模型稳定性评估", index=False, startrow=row_number
        )
        title2_1.to_excel(
            table, sheet_name="12.模型稳定性评估", index=False, startrow=row_number + 1
        )
        title2_2.to_excel(
            table,
            sheet_name="12.模型稳定性评估",
            index=False,
            startrow=row_number + 1,
            startcol=12,
        )

        sheet12_1.to_excel(table, sheet_name="12.模型稳定性评估", startrow=row_number + 3)
        sheet12_2.to_excel(
            table, sheet_name="12.模型稳定性评估", startrow=row_number + 3, startcol=12
        )
    except:
        pass
    # -----------------------------------------------------------------------------------------------------------------
    # 13.样本风险评分分布
    title1 = pd.DataFrame(columns=["1、等高分布"])
    title2 = pd.DataFrame(columns=["分数整体分布情况-训练集"])
    title3 = pd.DataFrame(columns=["分数整体分布情况-测试集"])
    title4 = pd.DataFrame(columns=["分数整体分布情况-验证集"])
    head.to_excel(table, sheet_name="13.样本风险评分分布", index=False)
    title1.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=1)
    title2.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=2)
    title3.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=19)

    # 等高---------
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis = 0,sort = False)
    df = score_total[score_total[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=3,startcol=11)

    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=3)
    # 测试集
    df = score_test[score_test[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=20)
    # 等频
    title1 = pd.DataFrame(columns=["1、等频分布"])
    title1.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=54)
    title2.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=55)
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis = 0,sort = False)
    df = score_total[score_total[y] != grey]
    df.sort_values(by="score", ascending=True, inplace=True)
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(df.score.values, percent_list)
    breaks = [int(i) for i in list(set(breaks))]
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    df["score"] = pd.cut(df.score, bins=breaks, right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=56, startcol=11)
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=56)
    # 测试集
    dt = score_test[score_test[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=78)

    # 有验证集情况
    try:
        # 等高
        title4.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=37)
        df = score_oot[score_oot[y] != grey]
        df.loc[df.score < score_range[0], "score"] = score_range[0]
        df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
        df["score"] = pd.cut(
            df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df = df.fillna(0)
        df["区间人数"] = df[0] + df[1]
        df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
        df["区间坏客户率"] = df[1] / df["区间人数"]
        df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
        df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
        df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
        df.reset_index(inplace=True)
        df.rename(columns={"score": "评分区间"}, inplace=True)
        del df[0], df[1]
        df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=38)
        # 等频
        title4.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=104)
        dt = score_oot[score_oot[y] != grey]
        dt.sort_values(by="score", ascending=True, inplace=True)
        dt.loc[dt.score < score_range[0], "score"] = score_range[0]
        dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
        dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
        dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        dt = dt.fillna(0)
        dt["区间人数"] = dt[0] + dt[1]
        dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
        dt["区间坏客户率"] = dt[1] / dt["区间人数"]
        dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
        dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
        dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
        dt.reset_index(inplace=True)
        dt.rename(columns={"score": "评分区间"}, inplace=True)
        del dt[0], dt[1]
        dt.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=105)
    except:
        pass
    # ----------------------------------------------------------------------------------------------------------------
    # 14.评分决策表
    head.to_excel(table, sheet_name="14.评分决策表", index=False)
    title1 = pd.DataFrame(columns=["1、等高"])
    title2 = pd.DataFrame(columns=["2、等频"])
    title3 = pd.DataFrame(columns=["评分决策表"])
    # 等高
    title1.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=1)
    title3.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=2)
    score_total = pd.concat([score_train, score_test], axis=0, sort=False)
    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1] - 1
    score_total["score"] = pd.cut(
        score_total.score,
        bins=range(score_range[0], score_range[1] + 1, tick),
        right=False,
    )
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=3)
    # 等频
    title2.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=25)
    title3.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=26)
    score_total = pd.concat([score_train, score_test], axis=0, sort=False)
    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1]
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = [int(i) for i in list(set(breaks))]
    breaks = sorted(breaks)
    breaks[-1] = 1000
    breaks[0] = 300
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=27)
    table.save()
    return 1
    # over