# -*- coding: utf-8 -*-
"""
Logistic建模用到的函数

Created on Dec 2021

Author@chang.lu
"""
import pandas as pd
import numpy as np
import os
import scorecardpy as sc
import re
import statsmodels.api as sm
import statsmodels.stats.outliers_influence as oi
import datetime as dtt
from sklearn.linear_model import LogisticRegression
import joblib
from joblib import Parallel, delayed
from matplotlib import pyplot as plt
from sklearn import tree

def flag_hit(dat):
    # 查看每个产品的匹配率
    flag_list = [i for i in dat.columns if re.match('flag_',i) != None]
    for i in flag_list:
        dat[i] = dat[i].replace([98,99],0,regex = True)
    count = len(dat)
    pp = dat[flag_list].apply(pd.value_counts).T[1]
    pp_rate  = pp/count
    df = pd.DataFrame(index = pp.index)
    df['匹配数量'] = list(pp)
    df['匹配率'] = ['{:.2%}'.format(i) for i in list(pp_rate)]
    return df

def badrate_all(df,y = 'flagy'):
    df_temp = df[y].value_counts(dropna = False).reset_index()
    df_temp['总人数'] = sum(df_temp[y])
    df_temp['比率'] = (df_temp[y]/df_temp['总人数']).apply(lambda x:'%.2f'%(x*100)+'%')
    df_temp.columns = ['flagy','人数','总人数','比率']
    return df_temp

def var_ks(df_data, col_var ,flagy):
    list_big = []
    for i in col_var:
        list_small = []
        try:
            auc = roc_auc_score(y_true=df_data[flagy], y_score=df_data[i])
            fpr, tpr, _ = roc_curve(y_true=df_data[flagy], y_score=df_data[i])
            ks = max(abs(tpr-fpr))
            list_small.extend([i, auc, ks])
            list_big.append(list_small)
        except:pass
    result = pd.DataFrame(list_big, columns = ['var', 'auc', 'ks']).sort_values(by='ks',ascending=False)
    return result
    
def cate_var_transform(X,Y):
    ##取出数据类型
    d_type = X.dtypes
    object_var = X.iloc[:, np.where(d_type == "object")[0]]
    num_var = X.iloc[:, np.where(d_type != "object")[0]]
    
    #object_transfer_rule用于记录每个类别变量的数值转换规则
    object_transfer_rule = {}
    
    #object_transform是类别变量数值化转化后的值
    object_transform = pd.DataFrame(np.zeros(object_var.shape),
                                    columns=object_var.columns) 
    
    for i in range(0,len(object_var.columns)):
        
        temp_var = object_var.iloc[:, i]
        
        ##除空值外的取值种类
        unique_value=np.unique(temp_var.iloc[np.where(~temp_var.isna() )[0]])
    
        transform_rule=pd.concat([pd.DataFrame(unique_value,columns=['raw data']),
                                       pd.DataFrame(np.zeros([len(unique_value),2]),
                                                    columns=['transform data','bad rate'])],axis=1) 
        for j in range(0,len(unique_value)):
            bad_num=len(np.where( (Y == 1) & (temp_var == unique_value[j]) )[0])
            all_num=len(np.where(temp_var == unique_value[j])[0])
            
            #计算badprob
            if all_num == 0:#防止all_num=0的情况，报错
                all_num=0.5  
            transform_rule.iloc[j,2] = 1.0000000*bad_num/all_num
        
        #按照badprob排序，给出转换后的数值
        transform_rule = transform_rule.sort_values(by='bad rate')
        transform_rule.iloc[:,1]=list(range(len(unique_value),0,-1))
         
        #保存转换规则
        object_transfer_rule.update({object_var.columns[i]: transform_rule})
        #转换变量
        for k in range(0,len(unique_value)):
            transfer_value = transform_rule.iloc[np.where(transform_rule.iloc[:,0] == unique_value[k])[0],1]
            object_transform.iloc[np.where(temp_var == unique_value[k])[0],i] = float(transfer_value)
        object_transform.iloc[np.where(object_transform.iloc[:,i] == 0)[0],i] = np.nan 
    
    X_transformed = pd.concat([num_var,object_transform],axis = 1) 
    return(X_transformed,object_transfer_rule)

 
def lasso_selection(reg_data,reg_target,alpha_ = 0.001,max_iter_=10000,Lasso_CV = True):
    if Lasso_CV:
        from sklearn.linear_model import LassoCV
        lassocv = LassoCV()
        lassocv.fit(reg_data, reg_target)
        # 交叉验证选择的参数alpha
        print(f'交叉验证选择的参数alpha：{lassocv.alpha_}')
        # # 最终Lasso模型中的变量系数
        # print(lassocv.coef_[:10])
        # Lasso细筛出的变量个数
        print(f'筛选出的变量个数：{np.sum(lassocv.coef_ > 0)}')
        mask = lassocv.coef_ > 0
    else:
        # 非交叉验证拟合lasso
        from sklearn.linear_model import Lasso
        lasso=Lasso(alpha = alpha_,max_iter = max_iter_)
        lasso.fit(reg_data, reg_target)
        # 最终Lasso模型中的变量系数
        print(f'变量系数：{lasso.coef_[:10]}')
        # Lasso细筛出的变量个数
        print(f'筛选出的变量个数：{np.sum(lasso.coef_ > 0)}')
        mask = lasso.coef_ > 0
    lasso_sel_var = reg_data.iloc[:, mask].columns.tolist()
    drop_col = list(set(reg_data.columns.tolist())-set(lasso_sel_var))
    
    df_dropcol_lasso = pd.DataFrame([i.replace("_woe", "") for i in drop_col],columns = ['drop_col'])
    df_dropcol_lasso['DropReason'] = 'lasso'
    df_dropcol_lasso['Count'] = len(drop_col)
    return lasso_sel_var,df_dropcol_lasso

def stepwise_selection(X, y, 
                       initial_list = [], 
                       threshold_in = 0.01, 
                       threshold_out = 0.05, 
                       verbose = True):
    """ Perform a forward-backward feature selection 
    based on p-value from statsmodels.api.OLS
    Arguments:
        X - pandas.DataFrame with candidate features
        y - list-like with the target
        initial_list - list of features to start with (column names of X)
        threshold_in - include a feature if its p-value < threshold_in
        threshold_out - exclude a feature if its p-value > threshold_out
        verbose - whether to print the sequence of inclusions and exclusions
    Returns: list of selected features 
    Always set threshold_in < threshold_out to avoid infinite looping.
    See https://en.wikipedia.org/wiki/Stepwise_regression for the details
    """
    initial_var = X.columns.tolist()
    included = list(initial_list)
    while True:
        changed = False
        # forward step
        excluded = list(set(X.columns)-set(included))
        new_pval = pd.Series(index=excluded)
        for new_column in excluded:
            model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included+[new_column]])), family = sm.families.Binomial()).fit()
            new_pval[new_column] = model.pvalues[new_column]
        best_pval = new_pval.min()
        if best_pval < threshold_in:
            best_feature = new_pval.index[new_pval.argmin()]
            included.append(best_feature)
            changed=True
            if verbose:
                print('Add  {:30} with p-value {:.6}'.format(best_feature, best_pval))

        # backward step
        model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included])), family = sm.families.Binomial()).fit()
        # use all coefs except intercept
        pvalues = model.pvalues.iloc[1:]
        worst_pval = pvalues.max() # null if pvalues is empty
        if worst_pval > threshold_out:
            changed=True
            worst_feature = pvalues.index[pvalues.argmax()]
            included.remove(worst_feature)
            if verbose:
                print('Drop {:30} with p-value {:.6}'.format(worst_feature, worst_pval))
        if not changed:
            break
    drop_col = list(set(initial_var)-set(included))
    
    df_dropcol_stepwise = pd.DataFrame([i.replace("_woe", "") for i in drop_col],columns = ['drop_col'])
    df_dropcol_stepwise['DropReason'] = 'stepwise'
    df_dropcol_stepwise['Count'] = len(drop_col)
    return included,df_dropcol_stepwise

#缺失度和集中度筛选
def missing_identify_select(df,y='flagy',missing_trd=0.9,identify_trd=0.95,identify_num=1):
    
    #缺失率
    #missing_rate=df.isna().sum()/len(df)
    col_drop_missing=df.columns[df.isna().sum()/len(df)>missing_trd].tolist()
    dropcol_missing = pd.DataFrame(col_drop_missing,columns = ['drop_col'])
    dropcol_missing['DropReason'] = 'missingrate>%s' %missing_trd
    dropcol_missing['Count'] = len(col_drop_missing)
    col_rest_missing=list(set(df.columns)-set(col_drop_missing)-set([y]))
    
    #集中度
    #除缺失外只有一个唯一值
    col_drop_identify1=np.array(col_rest_missing)[df[col_rest_missing].nunique()<=identify_num].tolist()
    #单个值的集中度大于某个阈值
    idt_rate = df[col_rest_missing].apply(lambda a: a.value_counts().max() / a.size)
    col_drop_identify2=np.array(col_rest_missing)[idt_rate>identify_trd].tolist()
    
    col_drop_identify=list(set(col_drop_identify1+col_drop_identify2))
    
    dropcol_identify = pd.DataFrame(col_drop_identify,columns = ['drop_col'])
    dropcol_identify['DropReason'] = 'identifyrate>%s or identify_num<=%s' %(identify_trd,identify_num)
    dropcol_identify['Count'] = len(col_drop_identify)
    
    col_rest=list(set(df.columns)-set(col_drop_missing)-set(col_drop_identify))
    
    df_drop_miss_iden=pd.concat([dropcol_missing,dropcol_identify],axis=0)
    
    return col_rest,df_drop_miss_iden

def nan_iden_filter(df_,missing_limit,identical_limit,kp_var = None):
    df_1 = df_.copy()
    if kp_var:
        df_ = df_.drop(columns = kp_var)
    # 缺失值
    nan_rate = lambda a: a[a.isnull()].size/a.size
    na_perc = df_.apply(nan_rate).reset_index(name='missing_rate').rename(columns={'index':'variable'})
    # 同值
    idt_rate = lambda a: a.value_counts().max() / a.size
    identical_perc = df_.apply(idt_rate).reset_index(name='identical_rate').rename(columns={'index':'variable'})
    
    dt_merge = na_perc.merge(identical_perc,on = 'variable') # 合并
    dt_var_sel = dt_merge.query('(missing_rate <= {}) & (identical_rate <= {})'.format(missing_limit,identical_limit))
    dt_var_del = dt_merge.query('(missing_rate > {}) | (identical_rate > {})'.format(missing_limit,identical_limit))
    var_sel = dt_var_sel['variable'].tolist()
    if kp_var:
        df_final = df_1[var_sel + kp_var]
    else:
        df_final = df_1[var_sel]
    return df_final

def bin_iv_filter(df1,bins_num = 10,y='flagy',iv_limit = 0.02,kp_var = None):
    df = df1.copy()
    if kp_var:
        df = df.drop(columns = kp_var)
    # 等频分箱后筛选变量
    df_bin = df.drop(columns = [y]).apply(lambda x: pd.qcut(x,bins_num,labels = None,duplicates = 'drop'))
    df_merge_y = pd.concat([df_bin,df[y]],axis = 1)
    ivlist = iv(df_merge_y, y=y)
    iv_var = ivlist[ivlist.info_value >= iv_limit].variable.tolist()
    if kp_var:
        # df_final = df1[iv_var + kp_var + [y]]
        remain_col = iv_var + kp_var + [y]
    else:
        # df_final = df1[iv_var + [y]]
        remain_col = iv_var + [y]

    drop_col_bin_iv = list(set(df.columns) - set(remain_col))

    df_dropcol_bin_iv = pd.DataFrame(drop_col_bin_iv,columns = ['drop_col'])
    df_dropcol_bin_iv['DropReason'] = 'bin_iv<%s'%iv_limit
    df_dropcol_bin_iv['Count'] = len(drop_col_bin_iv)

    return remain_col,df_dropcol_bin_iv

def woe_iv_filter(df1,y='flagy',iv_limit = 0.02,kp_var = None):
    df = df1.copy()
    if kp_var:
        df = df.drop(columns = kp_var)
    # 筛选变量
    df_woe_iv = sc.iv(df,'flagy')
    iv_var = df_woe_iv[df_woe_iv.info_value >= iv_limit].variable.tolist()
    if kp_var:
        remain_col = iv_var + kp_var + [y]
    else:
        remain_col = iv_var + [y]

    drop_col_woe_iv = list(set(df.columns) - set(remain_col))

    df_dropcol_woe_iv = pd.DataFrame([i.replace("_woe", "") for i in drop_col_woe_iv],columns = ['drop_col'])
    df_dropcol_woe_iv['DropReason'] = 'woe_iv<%s'%iv_limit
    df_dropcol_woe_iv['Count'] = len(drop_col_woe_iv)

    return remain_col,df_dropcol_woe_iv

def object_var_del(df,num_limit = 10):
    # 删除类别过多的分类型变量
    object_cat_num = df.loc[:,df.dtypes == 'object'].apply(lambda x : len(x.value_counts())) # 变量类别数
    drop_col = object_cat_num[object_cat_num > num_limit].index.tolist() # 类别数大于阈值的变量
    df_result = df.drop(columns = drop_col)
    return df_result

def isna(data):    
    return(pd.isna(data).sum().index[pd.isna(data).sum().values>0])

def corr_iv(df1,corr_limit = 0.7,flag = "flagy"):
    df = df1.copy()
    corr = abs(df.drop(columns=[flag]).corr())
    corr_dict={}
    for i in range(corr.shape[0]-1):
        for j in range(corr.shape[0]-i-1):
            if corr.iloc[i,i+j+1] > corr_limit:
                corr_dict[(corr.index[i],corr.columns[i+j+1])] = corr.iloc[i,i+j+1]  
    iv_result = iv(df, flag)
    for name in corr_dict.keys():
        if name[0] in df.columns and name[1] in df.columns:
            name0_iv = float(iv_result[iv_result['variable'] == name[0]]['info_value'])
            name1_iv = float(iv_result[iv_result['variable'] == name[1]]['info_value'])
            if name0_iv > name1_iv:
                del df[name[1]]
            else:
                del df[name[0]]
    drop_col_corr = list(set(df1.drop(columns=[flag]).columns)-set(df.columns))
    df_drop_corr = pd.DataFrame([i.replace("_woe", "") for i in drop_col_corr],columns = ['drop_col'])
    df_drop_corr['DropReason'] = '相关性>%s'%corr_limit
    df_drop_corr['Count'] = len(drop_col_corr)
    print("相关性剔除变量：{}".format(drop_col_corr))

    return df,df_drop_corr


def vif_select(df1,vif_limit = 3,flag = "flagy"):
    import statsmodels.stats.outliers_influence as oi
    import statsmodels.api as sm
    xs = np.array(sm.add_constant(df1.drop(columns=[flag])),
                  dtype=np.float)
    xs_name = ["const"] + df1.drop(
        columns=[flag]).columns.to_list()  # 需要求VIF的变量
    vif = pd.DataFrame([{
        "variable": xs_name[i],
        "vif": oi.variance_inflation_factor(xs, i)
    } for i in range(len(xs_name))])
    vif = vif[vif.variable != 'const']
    train_woe_copy = df1.copy()
    while 1:
        vif.sort_values(by='vif', ascending=False, inplace=True)
        if vif.iloc[0]['vif'] > vif_limit:
            vif.reset_index(drop=1, inplace=True)
            print('drop: ' + vif.iloc[0]['variable'] + ' vif: ' +
                  str(vif.iloc[0]['vif']))
            del train_woe_copy[vif.iloc[0]['variable']]
            vif.drop(index=[0], inplace=True)
            xs = np.array(sm.add_constant(train_woe_copy.drop(columns=[flag])),
                          dtype=np.float)
            xs_name = ["const"] + vif['variable'].values.tolist()  # 需要求VIF的变量
            vif = pd.DataFrame([{
                "variable": xs_name[i],
                "vif": oi.variance_inflation_factor(xs, i)
            } for i in range(len(xs_name))])
            vif = vif[vif.variable != 'const']
        else:
            break
    drop_col_vif = list(set(df1.drop(columns=[flag]).columns)-set(vif.variable))
    df_drop_vif = pd.DataFrame([i.replace("_woe", "") for i in drop_col_vif],columns = ['drop_col'])
    df_drop_vif['DropReason'] = 'vif>%s'%vif_limit
    df_drop_vif['Count'] = len(drop_col_vif)

    return vif,train_woe_copy,df_drop_vif

def pvalue_params_select(df,pvalue_limit = 0.05,flag = "flagy"):
    df1 = df.copy()
    while 1:
        model = sm.GLM(df1[flag], sm.add_constant(df1.drop(columns=[flag])), family = sm.families.Binomial()).fit()
        pvalue = model.pvalues
        pvalue.drop(index=['const'], inplace=True)
        pvalue.sort_values(inplace=True, ascending=False)
        if pvalue.iloc[0] > pvalue_limit:
            print('drop '+ pvalue.index[0][:-4] + ' P-value: ' + str(pvalue.iloc[0]))
            del df1[pvalue.index[0]]
        else:
            break

    while 1:
        model = sm.GLM(df1[flag], sm.add_constant(df1.drop(columns=[flag])), family = sm.families.Binomial()).fit()
        params = model.params
        params.drop(index=['const'], inplace=True)
        params.sort_values(inplace=True, ascending=True)
        if params.iloc[0] < 0:
            print('drop ' + params.index[0][:-4] + ' Params: ' + str(params.iloc[0]))
            del df1[params.index[0]]
        else:
            break
    drop_col_pvalue_params = list(set(df.drop(columns=[flag]).columns)-set(df1.drop(columns=[flag]).columns))
    df_drop_pvalue_params = pd.DataFrame([i.replace("_woe", "") for i in drop_col_pvalue_params],columns = ['drop_col'])
    df_drop_pvalue_params['DropReason'] = 'lr'#'p-value>%s'%pvalue_limit
    df_drop_pvalue_params['Count'] = len(drop_col_pvalue_params)

    return df1,df_drop_pvalue_params

def myWOEbin(
    Data,
    Y,
    max_leaf_num=6, #max_leaf_num:分箱的最大箱数
    min_woe_box_percent=0.05,#min_woe_box_percent:叶节点最小样本量比例（仅占非空值的）
    min_woe_box_num_min=100,#min_woe_box_num_min:叶节点最小样本量；满足叶节点最小样本量比例，且满足叶节点最小样本量
    special_values=None,
    method = 'tree',):
    try:
        Data = Data.reset_index(drop=1)
        Y = Y.reset_index(drop=1)
    except Exception as e:
        print("输入数据非数据框格式，请调整为数据框！")

    # 每个分箱的数量不得小于总体取值样本的5%（可调整）
    min_woe_box_num = int(Data.shape[0] * min_woe_box_percent)
    if min_woe_box_num < min_woe_box_num_min:
        min_woe_box_num = min_woe_box_num_min

    var_num = len(Data.columns)

    # var_splitpoint用于存储每一个变量的分箱截点
    var_splitpoint = list(np.zeros([var_num, 1]))
    if special_values:
        special_values = list(special_values)
    else:
        special_values = []
    for i in range(0, var_num):
        # 非空的取值才进行决策树最优分箱
        temp_var = Data.iloc[:, i]
        NonNan_position = np.where(
            ~(temp_var.isna()) & ~(temp_var.isin(special_values))
        )[0]
        max_leaf = max_leaf_num
        # 最多分到6个叶子节点
        if method == 'tree':
            groupdt = tree.DecisionTreeClassifier(
                criterion="entropy",
                min_samples_leaf=min_woe_box_num,
                max_leaf_nodes=max_leaf,
            )

            groupdt.fit(
                np.array(Data.iloc[NonNan_position, i]).reshape(-1, 1),
                Y.iloc[NonNan_position],
            )
            dot_data = tree.export_graphviz(
                groupdt,
                out_file=None,
            )
            pattern = re.compile("<= (.*?)\\\\nentropy", re.S)
            split_num = re.findall(pattern, dot_data)
        elif method == 'freq':
            split_num = np.percentile(Data.iloc[NonNan_position, i], np.linspace(0,100, max_leaf, endpoint=False), 
                                     interpolation='linear')
            split_num = split_num[1:]

        splitpoint = [float(j) for j in split_num]
        final_splitpoint = sorted(list(set(splitpoint)))
        try:
            if Data.iloc[NonNan_position, i].min() >= final_splitpoint[0]:
                final_splitpoint = final_splitpoint[1:]
        except:
            pass
        var_splitpoint[i] = final_splitpoint

    var_splitpoint = pd.Series(var_splitpoint, index=Data.columns)
    return var_splitpoint

def single_woe_cal(data, y, SingleBreak, special_values=None):
    """
    data:df with 2 columns--single variable and the label
    y: column name of label"""
    print("data变量: {}".format(data.columns), data.shape)
    SingleBreak = sorted(list(set([-np.inf] + SingleBreak + [np.inf])))
    if y not in data.columns:
        raise Exception("label not found in data:", y)
    v = [i for i in data.columns.to_list() if i != y]
    if len(v) != 1:
        raise Exception("Input array must contain only one explanatory variable")
    # basic info
    total_num = data.shape[0]
    total_bad = data[y].sum()
    total_good = total_num - total_bad
    # CUT and group
    if not isinstance(special_values, list):
        special_values = [special_values]
    data1 = data[data[v[0]].isin(list(special_values) + [np.nan, None])]
    data = data[~data[v[0]].isin(list(special_values) + [np.nan, None])]

    tempdata = data.assign(bin=pd.cut(data[v[0]], SingleBreak, right=False))

    temptable = (
        tempdata.groupby("bin")
        .agg(["count", np.mean, np.sum])[y]
        .rename(columns={"count": "count", "mean": "badprob", "sum": "bad"})
    )

    # complete the bin description
    temptable["variable"] = v[0]
    temptable["count_distr"] = temptable["count"] / total_num
    temptable["good"] = temptable["count"] - temptable["bad"]
    temptable["breaks"] = SingleBreak[1:]
    temptable["bad_pct"] = temptable["bad"] / total_bad
    temptable["good_pct"] = (temptable["count"] - temptable["bad"]) / total_good
    temptable["woe"] = np.log(temptable["bad_pct"] / temptable["good_pct"])
    temptable['woe'] = temptable['woe'].replace(np.inf, 9999)
    temptable['woe'] = temptable['woe'].replace(-np.inf, -9999)
    temptable["bin_iv"] = (temptable["bad_pct"] - temptable["good_pct"]) * temptable[
        "woe"
    ]
    # 特殊值的相关分箱计算
    #     data1[v[0]] = data1[v[0]].astype(str)
    data1 = data1.fillna("nan")
    data1 = data1.rename(columns={v[0]: "bin"})
    temptable1 = (
        data1.groupby("bin")
        .agg(["count", np.mean, np.sum])[y]
        .rename(columns={"count": "count", "mean": "badprob", "sum": "bad"})
    )
    temptable1["variable"] = v[0]
    temptable1["count_distr"] = temptable1["count"] / total_num
    temptable1["good"] = temptable1["count"] - temptable1["bad"]
    temptable1["breaks"] = temptable1.index
    temptable1["bad_pct"] = temptable1["bad"] / total_bad
    temptable1["good_pct"] = (temptable1["count"] - temptable1["bad"]) / total_good
    temptable1["woe"] = np.log(temptable1["bad_pct"] / temptable1["good_pct"])
    temptable1['woe'] = temptable1['woe'].replace(np.inf, 9999)
    temptable1['woe'] = temptable1['woe'].replace(-np.inf, -9999)
    temptable1["bin_iv"] = (
        temptable1["bad_pct"] - temptable1["good_pct"]
    ) * temptable1["woe"]

    single_bin = pd.concat([temptable1, temptable], axis=0, sort=False)
    single_bin["total_iv"] = single_bin["bin_iv"].sum()
    single_bin.reset_index(inplace=True)
    single_bin = {
        v[0]: single_bin[
            [
                "variable",
                "bin",
                "count",
                "count_distr",
                "good",
                "bad",
                "badprob",
                "woe",
                "bin_iv",
                "total_iv",
                "breaks",
            ]
        ]
    }
    return single_bin

def sdf_woebin(
    data,
    y,
    breaks_list=None,
    max_leaf_num=6,
    min_woe_box_percent=0.01,
    min_woe_box_num_min=100,
    special_values=None,
    method = 'tree'):
    # Pandas Version
    """data: df-like form ,the original dataset
    y: str form, the name of label
    breaks_list: dict from , dict of break points"""
    # label check
    if y not in data.columns:
        raise Exception("label not found in data:", y)
    # basic info
    total_num = data.shape[0]
    total_bad = data[y].sum()
    total_good = total_num - total_bad
    # 决策树找分割点
    breaks = myWOEbin(
        data.drop(columns=y),
        data[y],
        max_leaf_num=max_leaf_num,
        min_woe_box_percent=min_woe_box_percent,
        min_woe_box_num_min=min_woe_box_num_min,
        special_values=special_values,
        method = method
        
    )
    breaks = breaks.to_dict()
    # 不应传入特殊值的dict
    if breaks_list:
        if isinstance(special_values, list):
            breaks_list = dict(
                [
                    (k, list(set(v) - set(special_values) - set(["nan"])))
                    for k, v in breaks_list.items()
                ]
            )
        else:
            breaks_list = dict(
                [
                    (k, list(set(v) - set([special_values]) - set(["nan"])))
                    for k, v in breaks_list.items()
                ]
            )
        breaks.update(breaks_list)
        # parallel process--calculating woe and iv, reforming
    parallel_res = Parallel(n_jobs=-1)(
        delayed(single_woe_cal)(
            data[[y, v]], y, breaks[v], special_values=special_values
        )
        for v in list(breaks)
    )
    # reforming the result: from tuple to dicts
    bins_dict = {}
    for b in parallel_res:
        bins_dict.update(b)
    return bins_dict

def _single_transform(series, trans_dict):
    series.rename(series.name + "_woe", inplace=True)
    normal_dict = {
        i: j for i, j in trans_dict.items() if type(i) == pd._libs.interval.Interval
    }
    special_dict = {
        i: j for i, j in trans_dict.items() if type(i) != pd._libs.interval.Interval
    }
    series1 = series[series.isin(list(special_dict) + [np.nan])]
    series2 = series[~series.isin(list(special_dict) + [np.nan])]
    #     try:
    #         special_dict.update(dict([(i, trans_dict[i]) for i in special_values]))
    #     except:
    #         print("Warning: No special_values get")
    series1 = series1.fillna("nan")
    result = pd.concat([series1.map(special_dict), series2.map(normal_dict)], axis=0)
    return result

def sdf_woebin_ply(data, bins, n_jobs=-1):
    """
    data:untransformed data
    Trans_dict: dict for transformation
    --format of Trans_dict:{'var_1':trans_dict
    --format of trans_dict:{interval(0,1):0.231,interval(1,3):0.501}
    n_jobs: nums of cpu"""
    #     Trans_dict = dict(
    #         zip(
    #             list(bins),
    #             [d["breaks"].values.tolist() for _, d in bins.items()],
    #         )
    #     )
    Woe_dict = dict(
        [
            (i, dict(j[["bin", "woe"]].to_dict(orient="split")["data"]))
            for i, j in bins.items()
        ]
    )
    datacopy = data.copy()
    keys = list(Woe_dict)
    # do some print
    DiffElement = list(set(keys) - set(data.columns))
    Untransformed = list(set(data.columns) - set(keys))
    try:
        data_untrans = data[Untransformed]
    except:
        data_untrans = pd.DataFrame()
    if len(DiffElement) != 0:
        raise Exception("These variables aren't included in data:", DiffElement)
    if len(Untransformed) != 0:
        if len(Untransformed) >= 5:
            print(
                "These variables will not be transformed:",
                Untransformed[:5],
                "list goes on",
            )
        else:
            print(
                "These variables will not be transformed:",
                Untransformed,
                "list goes on",
            )
    parallel_res = Parallel(n_jobs=n_jobs)(
        delayed(_single_transform)(data[v], Woe_dict[v]) for v in keys
    )
    data_transformed = pd.concat(parallel_res + [data_untrans], axis=1)
    return data_transformed

def sdf_woebin_plot(bins):
    plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
    plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
    figure = {}
    for col in bins:
        #         plt.figure(figsize=(100, 70))
        f, ax1 = plt.subplots(figsize=(8, 6))
        ax1.set_ylabel("占比")
        ax1.set_title(col + ", iv={}".format(round(bins[col].iloc[0]["total_iv"], 6)))
        plt.xticks(list(range(len(bins[col]))), bins[col]["bin"], rotation=45, fontsize=8)        
        ax1.bar(
            list(range(len(bins[col]))),
            bins[col]["count_distr"] * bins[col]["good"] / bins[col]["count"],
            label="占比",
            color=(24/254,192/254,196/254),)
        ax1.bar(
            list(range(len(bins[col]))),
            bins[col]["count_distr"] * bins[col]["bad"] / bins[col]["count"],
            bottom=bins[col]["count_distr"] * bins[col]["good"] / bins[col]["count"],
            color=(246/254,115/254,109/254),)
        for a, b in zip(range(len(bins[col])), bins[col]["count_distr"]):
            c = str(round(b * 100, 2)) + "%"
            plt.text(a, b * 1.02, c + ' '+ str(bins[col].loc[a, 'count']), ha="center", va="baseline", fontsize=10)
        plt.rcParams["axes.grid"] = False
        
        ax2 = ax1.twinx()
        ax2.set_ylabel("坏客率")
        ax2.plot(
            range(len(bins[col])),
            bins[col]["badprob"],
            label="坏客率",
            marker="o",
            markerfacecolor="white",
            markersize=8,
            color="blue",)  
        for a, b in zip(range(len(bins[col])), bins[col]["badprob"]):
            c = str(round(b * 100, 2)) + "%"
            ax2.text(a, b, c, ha="right", va="top", fontsize=10, color="blue")
        plt.xticks(
            list(range(len(bins[col]))), bins[col]["bin"], rotation=45, fontsize=8)
        ax1.legend(loc="upper right", bbox_to_anchor=(1, 0.9))
        ax2.legend(loc=1)
        figure.update({col: ax1})
    return figure

def mannual_breaks(pr_bins, data, ylabel="flagy", special_values=None):
    """INPUT:
        pr_bins: bins generated through auto-binning process
        data: data in original form
        ylabel: name of ylabel, in str form
    OUTPUT:
        all_breaklist
        droplist: variables that cannot be binned properly
    """

    import matplotlib.pyplot as plt

    xname = list(pr_bins)
    i = 0
    all_breaklist = {}
    droplist = []
    while i < (len(pr_bins)):
        b = xname[i]
        p1 = sdf_woebin_plot({b: pr_bins[b]})
        plt.show(p1)
        print(
            i,
            "/",
            len(pr_bins),
            "current splitting points:",
            list(pr_bins[b]["breaks"]),
        )
        print(("Adjust breaks for (%s)?\n 1.next\n 2.yes\n 3.back" % b))
        if_adj = input("1:next    2. yes    3.back  4.drop  5.quit\n")
        new_breaks = {}
        while if_adj == "2":
            print("pls enter the new breaks:")
            try:
                new_breaks_points = input().split(",")
                new_breaks = {b: sorted([float(x) for x in new_breaks_points])}
                bins_adj_temp = sdf_woebin(
                    data.loc[:, [b, ylabel]],
                    y=ylabel,
                    breaks_list=new_breaks,
                    special_values=special_values,
                )
                p2 = sdf_woebin_plot(bins_adj_temp)
                plt.show(p2)
                breaks_output = sorted(
                    list(bins_adj_temp[b]["breaks"]),
                    key=lambda x: -np.inf if isinstance(x, str) else x,
                )
                print("current splitting points:", breaks_output)
                print(("Adjust breaks for (%s)?\n 1.next\n 2.yes\n 3.back" % b))
                if_adj = input("1:save and next    2. yes    3.back  4.drop")
            except Exception as e:
                print(e)
                print("Error while adjusting", b)
                if_continue = input("to continue? \n 1.yes 2.next 3.quit")
                if if_continue == "1":
                    continue
                elif if_continue == "2":
                    if_adj == "1"
                elif if_continue == "3":
                    break
        if if_adj == "1":
            if b not in new_breaks.keys():
                all_breaklist[b] = list(pr_bins[b]["breaks"])
            else:
                all_breaklist[b] = breaks_output
            i += 1
            continue

        if if_adj == "3":
            if i == 0:
                print('This is the first plot, "back" option forbidden')
                break
            else:
                i -= 1
        if if_adj == "4":
            droplist.append(b)
            i += 1
        if if_adj == "5":
            break
    print("Mannual adjustment completed")
    return all_breaklist, droplist

def sdf_single_scorecard_ply(dx, cardx, x):
    card_dict = dict(cardx[["bin", "points"]].to_dict("split")["data"])
    card_dict_interval = {
        i: j for i, j in card_dict.items() if type(i) == pd._libs.interval.Interval
    }
    card_dict_special = {
        i: j for i, j in card_dict.items() if type(i) != pd._libs.interval.Interval
    }
    special_values = list(card_dict_special)
    dx_special = dx[dx.isin(special_values)]
    dx_inter = dx[~dx.isin(special_values)]
    score = pd.concat(
        [dx_special.map(card_dict_special), dx_inter.map(card_dict_interval)], axis=0
    )
    return score

def sdf_scorecard_ply(data, card, only_total_score=True, var_kp=None):
    cardx = pd.concat(card, ignore_index=True)
    dt = data.copy(deep=True)
    cols = list(cardx.variable.unique())
    cols.remove("basepoints")
    dt[cols] = dt[cols].fillna("nan")
    parallel_score = Parallel(n_jobs=-1)(
        delayed(sdf_single_scorecard_ply)(dt[v], cardx[cardx.variable == v], v)
        for v in cols
    )
    try:
        data_score = pd.concat(parallel_score + [data[var_kp]], axis=1)
        data_score["score"] = (data_score.drop(columns = var_kp).sum(axis=1)+ cardx[cardx.variable == "basepoints"]["points"].values[0])
    except:
        data_score = pd.concat(parallel_score, axis=1)
        data_score["score"] = (data_score.sum(axis=1)+ cardx[cardx.variable == "basepoints"]["points"].values[0])

    if only_total_score:
        data_score.drop(columns=cols, inplace=True)
    return data_score

def iv(dt, y, x = None, positive='bad|1', order=True):
    dt = dt.copy(deep=True)
    if isinstance(y, str):
        y = [y]
    if isinstance(x, str) and x is not None:
        x = [x]
    if x is not None: 
        dt = dt[y+x]    
    # x variable names
    
    xs = list(set(dt.columns) - set(y))
    # info_value
    ivlist = pd.DataFrame({
        'variable': xs,
        'info_value': [iv_xy(dt[i], dt[y[0]]) for i in xs]
    }, columns=['variable', 'info_value'])
    # sorting iv
    if order: 
        ivlist = ivlist.sort_values(by='info_value', ascending=False)
    return ivlist
    # ivlist = iv(dat, y='creditability')

def iv_xy(x, y):
    # good bad func
    def goodbad(df):
        names = {'good': (df['y']==0).sum(),'bad': (df['y']==1).sum()}
        return pd.Series(names)
    # iv calculation
    iv_total = pd.DataFrame({'x':x.astype('str'),'y':y}) \
      .fillna('missing') \
      .groupby('x') \
      .apply(goodbad) \
      .replace(0, 0.9) \
      .assign(
        DistrBad = lambda x: x.bad/sum(x.bad),
        DistrGood = lambda x: x.good/sum(x.good)
      ) \
      .assign(iv = lambda x: (x.DistrBad-x.DistrGood)*np.log(x.DistrBad/x.DistrGood)) \
      .iv.sum()
    # return iv
    return iv_total