# -*- coding: utf-8 -*-
"""
Created on Wed Jul  7 14:47:51 2021

"""
import pandas as pd
import numpy as np
import re
import statsmodels.stats.outliers_influence as oi
import statsmodels.api as sm
import scorecardpy as sc

#正则，输入正则表达式，及列名列表，返回对应的列名
def reg(rex,col_list):
    import re
    a = [i for i in list(col_list) if re.match(rex,i) != None]
    return a

#月份分布以及坏账分布情况
def month_distr(df,x='fst_loan_dt',y='flagy'):
    mon=pd.DataFrame(df[y])
    mon.columns=['flagy']
    mon["index"]=mon.index
    mon["mon"]=df[x].astype(str).astype('M8[M]')
    mon_distr=mon.pivot_table('index',index='mon',columns='flagy',aggfunc='count',fill_value=0)
    mon_distr['count']=np.sum(mon_distr,axis=1)
    mon_distr['count_dis']=mon_distr['count']/sum(mon_distr['count'])
    mon_distr['bad_rate']=mon_distr[1]/mon_distr['count'] 
    mon_distr['badrate_avg']=sum(mon_distr[1])/sum(mon_distr['count'])
    return mon_distr

#坏账占比    
def rate(df,y='flagy'):
    tab=df[y].value_counts(dropna=False).reset_index()
    tab['总人数']=sum(tab.iloc[:,1])
    tab['比率']=tab.iloc[:,1]/tab['总人数']
    tab.columns=['flagy','人数','总人数','比率']
#    print(tab)
    return tab
    
# 查看每个产品的匹配率
def flag_hit(df):
    flag_list = [i for i in df.columns if re.match('flag_',i) != None]  
    df_flag=df[flag_list]
    table_hit = df_flag.apply(pd.value_counts).T.fillna(0)
    table_hit['数据量']=len(df_flag)
    table_hit['有效数量']=table_hit[0]+table_hit[1]
    table_hit['匹配数据']=table_hit[1]
    table_hit['匹配率'] = ['{:.2%}'.format(i) for i in list(table_hit[1]/table_hit['有效数量'])]
    return table_hit
    
#类别型转换    
def cate_var_transform_modif(X,Y):
    ##取出数据类型
    d_type = X.dtypes
    object_var = X.iloc[:, np.where(d_type == "object")[0]]
    num_var = X.iloc[:, np.where(d_type != "object")[0]]
    
    #object_transfer_rule用于记录每个类别变量的数值转换规则
    object_transfer_rule = list(np.zeros([len(object_var.columns)])) 
    
    #object_transform是类别变量数值化转化后的值
    object_transform = pd.DataFrame(np.zeros(object_var.shape),
                                    columns=object_var.columns) 
    rule={}
    for i in range(0,len(object_var.columns)):
        print(i)
        temp_var = object_var.iloc[:, i]
        
        ##除空值外的取值种类
        unique_value=np.unique(temp_var.iloc[np.where(~temp_var.isna() )[0]])
    
        transform_rule=pd.concat([pd.DataFrame(unique_value,columns=['raw_data']),
                                       pd.DataFrame(np.zeros([len(unique_value),5]),
                                                    columns=['transform_data','bad_rate','bad_num','all_num','num_distr'])],axis=1) 
        for j in range(0,len(unique_value)):
            bad_num=len(np.where( (Y == 1) & (temp_var == unique_value[j]))[0])
            all_num=len(np.where(temp_var == unique_value[j])[0])
            
            #计算badprob
            if all_num == 0:#防止all_num=0的情况，报错
                all_num=0.5  
            transform_rule.iloc[j,2] = 1.0000000*bad_num/all_num
            transform_rule.iloc[j,3] = bad_num
            transform_rule.iloc[j,4] = all_num
            transform_rule.iloc[j,5] = transform_rule.iloc[j,4]/len(X)
            
        
        #按照badprob排序，给出转换后的数值
        transform_rule = transform_rule.sort_values(by='bad_rate')
        transform_rule.iloc[:,1]=list(range(len(unique_value),0,-1))
        transform_rule['varname']=object_var.columns[i]
         
        #保存转换规则
        object_transfer_rule[i] = transform_rule
        rule[object_var.columns[i]]=object_transfer_rule[i]
        #转换变量
        for k in range(0,len(unique_value)):
            transfer_value = transform_rule.iloc[np.where(transform_rule.iloc[:,0] == unique_value[k])[0],1]
            object_transform.iloc[np.where(temp_var == unique_value[k])[0],i] = float(transfer_value)
        object_transform.iloc[np.where(object_transform.iloc[:,i] == 0)[0],i] = np.nan 
    
    X_transformed = pd.concat([num_var,object_transform],axis = 1) 
    return (X_transformed,transform_rule,rule)
    
#缺失度和集中度筛选
def missing_identify_select(df,y='flagy',missing_trd=0.9,identify_trd=0.95,identify_num=1):
    
    #缺失率
    #missing_rate=df.isna().sum()/len(df)
    col_drop_missing=df.columns[df.isna().sum()/len(df)>missing_trd].tolist()
    dropcol_missing = pd.DataFrame(col_drop_missing,columns = ['drop_col'])
    dropcol_missing['DropReason'] = 'missingrate>%s' %missing_trd
    dropcol_missing['Count'] = len(col_drop_missing)
    col_rest_missing=list(set(df.columns)-set(col_drop_missing)-set([y]))
    
    #集中度
    #除缺失外只有一个唯一值
    col_drop_identify1=np.array(col_rest_missing)[df[col_rest_missing].nunique()<=identify_num].tolist()
    #单个值的集中度大于某个阈值
    idt_rate = df[col_rest_missing].apply(lambda a: a.value_counts().max() / a.size)
    col_drop_identify2=np.array(col_rest_missing)[idt_rate>identify_trd].tolist()
    
    col_drop_identify=list(set(col_drop_identify1+col_drop_identify2))
    
    dropcol_identify = pd.DataFrame(col_drop_identify,columns = ['drop_col'])
    dropcol_identify['DropReason'] = 'identifyrate>%s or identify_num<=%s' %(identify_trd,identify_num)
    dropcol_identify['Count'] = len(col_drop_identify)
    
    col_rest=list(set(df.columns)-set(col_drop_missing)-set(col_drop_identify))
    
    df_drop_miss_iden=pd.concat([dropcol_missing,dropcol_identify],axis=0)
    
    return col_rest,df_drop_miss_iden

#iv筛选    
def feature_filter_iv(train,y='flagy',seuil=0.02):    
    train_woe = train.copy()
    iv_info = sc.iv(train_woe,y = y)
    
    #########################存储删除的变量###################################
    drop_col_for_iv = [i for i in iv_info[iv_info.info_value <= seuil].variable.tolist()]
    print("iv剔除变量：{}".format(drop_col_for_iv))
    print(train_woe.shape)
    
    df_dropcol_iv = pd.DataFrame(drop_col_for_iv,columns = ['drop_col'])
    df_dropcol_iv['DropReason'] = 'iv<=%s'%seuil
    df_dropcol_iv['Count'] = len(drop_col_for_iv)
    
    col_rest_iv=list(set(iv_info['variable'])-set(drop_col_for_iv))
    
    return iv_info,col_rest_iv,df_dropcol_iv

# LASSO+聚类筛选
def feature_selector_als(train_woe,y='flagy'):
    train_woe = train_woe.copy()
    
    print("#####LASSO 筛选....#####")
    #lasso，逻辑回归中的l1
    from sklearn.linear_model import LogisticRegression
    reg_x = train_woe.drop([y],axis = 1)
    reg_y = train_woe[y]
    logis = LogisticRegression(
       penalty = 'l1'
        ,C = 0.1
        ,solver ='liblinear').fit(X =reg_x ,y = reg_y)
    #系数
    coef = pd.Series(logis.coef_[0], index = reg_x.columns)   
    print("Lasso picked " + str(sum(coef > 0)) + 
          " variables and eliminated the other " +  
          str(sum(coef <= 0)) + " variables")   
    print("Lasso变量个数:")
    print(np.sum(logis.coef_ > 0))
    #最终lasso结果变量
    import  matplotlib as plt
    imp_coef = pd.concat([coef.sort_values().head(10),
                         coef.sort_values().tail(10)])
    plt.rcParams['figure.figsize'] = (8.0, 10.0)
    imp_coef.plot(kind = "barh")
    plt.pyplot.title("Coefficients in the Lasso Model")
    #最终lasso后的变量
    colrest_ls = [i for i in list(coef.index[coef.values>0]) + ['flagy']]
    print("lasso变量：{}".format(colrest_ls))
    
    ###########################add:存储因lasso删除的变量########################
    drop_col_for_lasso = list(set(train_woe.columns.tolist())-set(colrest_ls))
    
    #######汇总所有删除的变量
    df_dropcol_lasso = pd.DataFrame(drop_col_for_lasso,columns = ['drop_col'])
    df_dropcol_lasso['DropReason'] = 'lasso'
    df_dropcol_lasso['Count'] = len(drop_col_for_lasso)

    return colrest_ls,df_dropcol_lasso


def varclus(train_woe,y='flagy'):
    from  varclushi import VarClusHi
    #聚类
    vc = VarClusHi(train_woe,maxeigval2=1,maxclus=None).varclus()
    #计算iv
    iv_info = sc.iv(train_woe,y = y)
    #合并
    d = pd.merge(left = vc.rsquare,right = iv_info,left_on= 'Variable',right_on='variable')
    #每个类寻找那红iv最大的行
    d['rw'] = d.groupby(['Cluster'])['info_value'].rank(method='max', ascending=False, na_option='keep', pct=False, axis=0)
    return d

#相关性筛选1
def var_corr(df,seuil=0.8):
    #按相关最多次的变量进行剔除，直至没有高于相关性阈值的变量
    corr = pd.DataFrame(df.corr().applymap(lambda x : abs(x) > seuil).sum().sort_values(ascending = False)).rename(columns={0:'cnt'}).reset_index()
    drop_col_corr = []
    a = True
    while a == True:
        col = corr[corr.cnt > 1]["index"].tolist()
        if len(col) > 0:
            drop_col_corr.append(col[0])
            print(drop_col_corr)
            corr = pd.DataFrame(df.drop(drop_col_corr,axis = 1).corr().applymap(lambda x : abs(x) > seuil).sum().sort_values(ascending = False)).rename(columns={0:'cnt'}).reset_index()
        else:
            a = False
    col_rest_corr=list(set(df.columns)-set(drop_col_corr))
        
    df_drop_corr = pd.DataFrame(drop_col_corr,columns = ['drop_col'])
    df_drop_corr['DropReason'] = '相关性>%s'%seuil
    df_drop_corr['Count'] = len(drop_col_corr)
    print("相关性剔除变量：{}".format(drop_col_corr))
    
    return col_rest_corr,df_drop_corr

#相关性筛选2    
def var_corr2(df,y='flagy',seuil=0.8):
    #按变量两两相关的iv值进行筛选，将iv高的剔除
    df_iv = iv(df,y)
    dict_iv=df_iv.set_index('variable')['info_value'].to_dict()
    
    corr=df.corr().abs()
    high_corr=np.where(corr>seuil)
    high_var=[(corr.columns[x],corr.columns[y]) for x,y in zip(*high_corr) if x!=y and x<y]
    drop_col_corr=[]
    for i in range(len(high_var)):
        if dict_iv[high_var[i][0]] < dict_iv[high_var[i][1]]:
            drop_col_corr.append(high_var[i][1])
        else:
            drop_col_corr.append(high_var[i][0])
    
    drop_col_corr=list(set(drop_col_corr))
    
    col_rest_corr=list(set(df.columns)-set(drop_col_corr))
    
    df_drop_corr = pd.DataFrame(drop_col_corr,columns = ['drop_col'])
    df_drop_corr['DropReason'] = '相关性>%s'%seuil
    df_drop_corr['Count'] = len(drop_col_corr)
    print("相关性剔除变量：{}".format(drop_col_corr))
    
    return col_rest_corr,df_drop_corr

#逐步回归
def stepwise_selection(X, y, 
                       initial_list = [], 
                       threshold_in = 0.01, 
                       threshold_out = 0.05, 
                       verbose = True):
    """ Perform a forward-backward feature selection 
    based on p-value from statsmodels.api.OLS
    Arguments:
        X - pandas.DataFrame with candidate features
        y - list-like with the target
        initial_list - list of features to start with (column names of X)
        threshold_in - include a feature if its p-value < threshold_in
        threshold_out - exclude a feature if its p-value > threshold_out
        verbose - whether to print the sequence of inclusions and exclusions
    Returns: list of selected features 
    Always set threshold_in < threshold_out to avoid infinite looping.
    See https://en.wikipedia.org/wiki/Stepwise_regression for the details
    """
    import statsmodels.api as sm
    included = list(initial_list)
    while True:
        changed = False
        # forward step，向前筛选,每次向其中增加一个p值最小的变量。
        excluded = list(set(X.columns)-set(included))
        new_pval = pd.Series(index=excluded)
        #print(new_pval)
        for new_column in excluded:
            model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included+[new_column]])), family = sm.families.Binomial()).fit()
            #print(model.pvalues)
            new_pval[new_column] = model.pvalues[new_column]#循环计算每个变量的p值
        print('############################')
        #print(new_pval)
        best_pval = new_pval.min()#p值最小变量
       # print('new_pval:{}\n '.format(new_pval))
        if best_pval < threshold_in:
            best_feature = list(new_pval.index[list(new_pval == best_pval)])[0]
            print('best_feature:{}\n'.format(best_feature))
            included.append(best_feature)
            changed=True
            if verbose:
                print('Add  {:30} with p-value {:.6}'.format(best_feature, best_pval))
        # backward step 向后筛选，每次剔除一个p值最高变量
        model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included])), family = sm.families.Binomial()).fit()
        # use all coefs except intercept
        pvalues = model.pvalues.iloc[1:]
        print(pvalues)
        worst_pval = pvalues.max() # null if pvalues is empty
        if worst_pval > threshold_out:
            changed=True
            worst_feature = list(pvalues.index[list(pvalues == worst_pval)])[0]
            included.remove(worst_feature)
            if verbose:
                print('Drop {:30} with p-value {:.6}'.format(worst_feature, worst_pval))
        if not changed:
            break
    return included


#vif筛选变量    
def vif(train_woe,y='flagy',seuil=3):
    x_col = [i for i in train_woe.columns if i != y]
    reg_x  =train_woe[x_col]
    # 看VIF值
    xs = np.array(sm.add_constant(reg_x), dtype=np.float)
    xs_name = ["const"] + reg_x.columns.tolist() # 需要 计算VIF的变量
    vif = pd.DataFrame([{"variable":xs_name[i], "vif":oi.variance_inflation_factor(xs, i)} for i in range(len(xs_name))])
    vif = vif.sort_values(by = 'vif',ascending = False)
    print(vif)
    
    #######add:存储因VIF而删除的变量
    drop_col_for_vif = []
    if len(vif[vif.vif>3]['vif'].tolist()) >0:
        drop_col_for_vif = vif[vif['vif']>seuil]['variable'].tolist()
        print("VIF>3剔除变量：{}".format(drop_col_for_vif))
    
    df_dropcol_vif = pd.DataFrame(drop_col_for_vif,columns = ['drop_col'])
    df_dropcol_vif['DropReason'] = 'vif>%s'%seuil
    df_dropcol_vif['Count'] = len(drop_col_for_vif)    
    
    colrest_vif=list(set(train_woe.columns)-set(drop_col_for_vif))

    return vif,colrest_vif,df_dropcol_vif
    
def lr(train_woe,y='flagy'):
    drop_col = []
    while True:
        drop_col_for_lr = []
        x_train_woe = train_woe.drop(y,axis = 1)
        y_train_woe = train_woe[y]
        import statsmodels.api as sm
        model = sm.GLM(y_train_woe, sm.add_constant(x_train_woe), family = sm.families.Binomial()).fit()
        #系数为负数的变量
        print("系数负数的变量：")
        print(model.params[1:][model.params[1:]<0].index.tolist())
        drop_col_for_lr =  drop_col_for_lr+ model.params[1:][model.params[1:]<0].index.tolist()
        #p值大于0.05的变量
        print("p值大于0.05变量：")
        print(model.pvalues[1:][model.pvalues[1:] >= 0.05].index.tolist())
        drop_col_for_lr =  drop_col_for_lr+ model.pvalues[1:][model.pvalues[1:] >= 0.05].index.tolist()
        print("最终删除变量：")
        print(drop_col_for_lr)
        if len(drop_col_for_lr) == 0:
            print(model.summary())
            break 
        else:
            train_woe = train_woe.drop(drop_col_for_lr,axis = 1)
            drop_col = drop_col + drop_col_for_lr
            
    df_dropcol_lr = pd.DataFrame(drop_col_for_lr,columns = ['drop_col'])
    df_dropcol_lr['DropReason'] = 'lr'
    df_dropcol_lr['Count'] = len(drop_col_for_lr)

    return model,df_dropcol_lr

#变量PSI
def var_psi(train,test,fin_col,bins_fin):

    train_woe=sc.woebin_ply(train[fin_col],bins=bins_fin)
    test_woe=sc.woebin_ply(test[fin_col],bins=bins_fin)
    
    PSI = {}
    for col in train_woe.columns:
        train_prop = train_woe[col].value_counts(normalize = True, dropna = False).reset_index()
        train_prop.columns=['value_woe','train_distr']
        test_prop = test_woe[col].value_counts(normalize = True, dropna = False).reset_index()
        test_prop.columns=['value_woe','test_distr']
        
        tab_psi=pd.merge(train_prop,test_prop, on=['value_woe'])
        tab_psi['var_psi']=(tab_psi['train_distr']-tab_psi['test_distr']) * np.log(tab_psi['train_distr']/tab_psi['test_distr'])
        tab_psi['total_psi']=sum(tab_psi['var_psi'])
        tab_psi['varname']=col.replace('_woe','')
        PSI[col]=tab_psi
        
        df_psi=pd.concat([v.assign(varname=k) for k,v in PSI.items()])
        df_psi=df_psi[['varname','value_woe','train_distr','test_distr','var_psi','total_psi']]
        df_total_psi=df_psi[['varname','total_psi']].drop_duplicates().reset_index()

    return df_psi,df_total_psi

import re
def ab(points0=600, odds0=1/19, pdo=50):
    b = pdo/np.log(2)
    a = points0 + b*np.log(odds0) #log(odds0/(1+odds0))
    
    return {'a':a, 'b':b}

def scorecard(bins, model, xcolumns, points0=600, odds0=1/19, pdo=50, basepoints_eq0=False):
    # coefficients
    aabb = ab(points0, odds0, pdo)
    a = aabb['a'] 
    b = aabb['b']
    # odds = pred/(1-pred); score = a - b*log(odds)
    
    # bins # if (is.list(bins)) rbindlist(bins)
    if isinstance(bins, dict):
        bins = pd.concat(bins, ignore_index=True)
    xs = [re.sub('_woe$', '', i) for i in xcolumns]
    # coefficients
    # coef_df = pd.Series(model.coef_[0], index=np.array(xs))\
    #   .loc[lambda x: x != 0]#.reset_index(drop=True)
    coef_df = pd.Series(model.params.tolist()[1:], index=np.array(xs))\
      .loc[lambda x: x != 0]#.reset_index(drop=True)      
      
    
    # scorecard
    len_x = len(coef_df)
    # basepoints = a - b*model.intercept_[0]
    basepoints = a - b*model.params[0]
    card = {}
    if basepoints_eq0:
        card['basepoints'] = pd.DataFrame({'variable':"basepoints", 'bin':np.nan, 'points':0}, index=np.arange(1))
        for i in coef_df.index:
            card[i] = bins.loc[bins['variable']==i,['variable', 'bin', 'woe']]\
              .assign(points = lambda x: round(-b*x['woe']*coef_df[i] + basepoints/len_x))\
              [["variable", "bin", "points"]]
    else:
        card['basepoints'] = pd.DataFrame({'variable':"basepoints", 'bin':np.nan, 'points':round(basepoints)}, index=np.arange(1))
        for i in coef_df.index:
            card[i] = bins.loc[bins['variable']==i,['variable', 'bin', 'woe']]\
              .assign(points = lambda x: round(-b*x['woe']*coef_df[i]))\
              [["variable", "bin", "points"]]
    return card

#score PSI   
def score_psi(predtrain,ytrain,predtest,ytest,k): 

    bins=list(range(300,1050,k))
    
    table1=np.stack([predtrain,ytrain],axis=1)
    table1=pd.DataFrame(table1,columns=['pred','y'])
    table1['decile']=pd.cut(table1['pred'],bins,right=False)
    table1['decile']=table1['decile'].astype(str)
    count1=table1.groupby('decile').agg({'pred':'count','y':'sum'})
    count1.columns=['train_num','train_badnum']
    count1['train_distr']=count1['train_num']/sum(count1['train_num'])
    count1['train_badrate']=count1['train_badnum']/sum(count1['train_badnum'])
    
    table2=np.stack([predtest,ytest],axis=1)
    table2=pd.DataFrame(table2,columns=['pred','y'])
    table2['decile']=pd.cut(table2['pred'],bins,right=False)
    table2['decile']=table2['decile'].astype(str)
    count2=table2.groupby('decile').agg({'pred':'count','y':'sum'})
    count2.columns=['test_num','test_badnum']
    count2['test_distri']=count2['test_num']/sum(count2['test_num'])
    count2['test_badrate']=count2['test_badnum']/sum(count2['test_badnum'])
    
    tab_psi=pd.merge(count1,count2,on=['decile'],how='outer')
    tab_psi['psi']=(tab_psi['train_distr']-tab_psi['test_distri'])*np.log(tab_psi['train_distr']/tab_psi['test_distri'])
    tab_psi['psi']=np.where(np.in1d(tab_psi['psi'],[np.inf,np.nan,None]),0,tab_psi['psi'])
    tab_psi['total_psi']=sum(tab_psi['psi'])
    tab_psi=tab_psi[['train_num', 'train_distr', 'train_badnum', 'train_badrate', 'test_num',
        'test_distri','test_badnum', 'test_badrate', 'psi', 'total_psi']]
    
    return tab_psi   

def score_overdue(pred,y,bins_way='same_dist',order='desc',k=10): 
    table=np.stack([pred,y],axis=1)
    table=pd.DataFrame(table,columns=['pred','y'])
    
    if bins_way=='same_dist':
        bins=list(range(300,1050,k))
        bins[-1]=bins[-1]+1
    elif bins_way=='same_per':
        bins=np.unique(np.ceil(np.percentile(table['pred'], np.linspace(0, 100, k))))
        bins[-1]=bins[-1]+1
    table['decile']=pd.cut(table['pred'],bins,right=False)
    table['decile']=table['decile'].astype(str)
    table['decile']=[i.replace('.0','') for i in table['decile']]
    
    if order=='desc':        
        count=table.pivot_table('pred',index='decile',columns='y',aggfunc='count',fill_value=0)[::-1]
        count['区间人数']=np.sum(count,axis=1)
        count.rename(columns={0:'好客数',1:'坏客数',2:'灰客数'},inplace=True)        
        count['区间占比']=count['区间人数']/sum(count['区间人数'])
        count['区间坏客率']=count['坏客数']/count['区间人数']
        count["累计区间占比"]=np.cumsum(count['区间人数'])/sum(count['区间人数'])
        count["累计坏占比"]=np.cumsum(count['坏客数'])/sum(count['坏客数'])
        count["累计好占比"]=np.cumsum(count['好客数'])/sum(count['好客数'])
        count['KS']=np.abs(count["累计好占比"]-count["累计坏占比"])
        count['通过率']=np.cumsum(count['区间人数'])/sum(count['区间人数'])
        count['平均违约率']=np.sum(count['坏客数'])/np.sum(count['区间人数'])
        count['通过违约率']=np.cumsum(count['坏客数'])/np.cumsum(count['区间人数'])
        count['拒绝违约率']=((sum(count['坏客数']))-np.cumsum(count['坏客数']))/(sum(count['区间人数'])-np.cumsum(count['区间人数']))
        count['违约率下降']=1-count['通过违约率']/count['平均违约率']
        count['提升度']=count['拒绝违约率']/count['平均违约率']
        
    if order=='asc':        
        count=table.pivot_table('pred',index='decile',columns='y',aggfunc='count',fill_value=0)
        count['区间人数']=np.sum(count,axis=1)
        count.rename(columns={0:'好客数',1:'坏客数',2:'灰客数'},inplace=True)        
        count['区间占比']=count['区间人数']/sum(count['区间人数'])
        count['区间坏客率']=count['坏客数']/count['区间人数']
        count["累计区间占比"]=np.cumsum(count['区间人数'])/sum(count['区间人数'])
        count["累计坏占比"]=np.cumsum(count['坏客数'])/sum(count['坏客数'])
        count["累计好占比"]=np.cumsum(count['好客数'])/sum(count['好客数'])
        count['KS']=np.abs(count["累计好占比"]-count["累计坏占比"])
        count['通过率']=1-np.cumsum(count['区间人数'])/sum(count['区间人数'])
        count['平均违约率']=sum(count['坏客数'])/sum(count['区间人数'])
        count['通过违约率']=np.cumsum(count['坏客数'])/np.cumsum(count['区间人数'])
        count['拒绝违约率']=((sum(count['坏客数']))-np.cumsum(count['坏客数']))/(sum(count['区间人数'])-np.cumsum(count['区间人数']))
        count['违约率下降']=1-count['拒绝违约率']/count['平均违约率']
        count['提升度']=count['通过违约率']/count['平均违约率']
        
    return count 


def df_param_(model,train_woe,fin_col,train,flag_name='flagy'):

    df_param= model.summary().tables[1].data
    df_param= pd.DataFrame(data=df_param[1:], columns=df_param[0]).iloc[:,0:5]
    df_param.columns=['variable','params','std','Z_value','P_value']

    ####vif########
    import statsmodels.stats.outliers_influence as oi
    import statsmodels.api as sm
    x_col = df_param['variable'].tolist()[1:]
    reg_x  =train_woe[x_col]

    # 看VIF值
    xs = np.array(sm.add_constant(reg_x), dtype=np.float)
    xs_name = ["const"] + reg_x.columns.tolist() # 需要 计算VIF的变量
    vif = pd.DataFrame([{"variable":xs_name[i], "vif":oi.variance_inflation_factor(xs, i)} for i in range(len(xs_name))])

    ####IV#######
    model_iv = sc.iv(train_woe[fin_col],y = flag_name)

    #合并数据
    df_param['VIF'] = vif['vif'].tolist()
    df_param =pd.merge(df_param, model_iv,on = 'variable', how = 'left' )
    df_param['variable'] = [i.replace("_woe",'') for i in df_param['variable']]
    df_param['exp']=''
    df_param['missing']=[None]+[train[i].isna().sum()/len(train) for i in list(df_param['variable'][1:])]

    #调换顺序
    order = ['variable','exp','params','std','Z_value','P_value','VIF','info_value','missing']
    df_param = df_param[order]
    return df_param