#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun  6 16:01:11 2019

@类别型变量的分箱
    统计值，并计算占比
@数值型变量的分箱
    三种分箱
    1.等频分箱
    2.卡方分箱
    3.最优分箱

2019-06-27 bug 修复：1.修复区间为左开右闭2.修复等频分箱代码
@author: xubing
"""


import pandas as pd
import numpy as np
import math
from tqdm import trange
from scipy.stats import chi2
from sklearn.tree import DecisionTreeClassifier
from EDA import dist_out_helper

def cal_WOE_IV(df, cats, target):
    '''
    计算某一段的woe和iv，已经经过验证！
    '''
    eps = 0.0001#防止出现处以0的情况
    gbi = pd.crosstab(cats, df[target]) + eps
    gb = df[target].value_counts() + eps
    gbri = gbi / gb
    gbri['woe'] = np.log(gbri[1] / gbri[0])
    gbri['iv'] = (gbri[1] - gbri[0]) * gbri['woe']
    return gbri['woe'].to_dict(), gbri['iv'].to_dict(), gbri['iv'].sum()


#def cal_nan_distri(df,col,target): 
#    '''
#    #把空值单独作为一个段,单独计算woe和iv加到非空计算的DataFrame里面
#    '''
#    nan_rec = {}
#    nan_df = df[target][df[col].isnull()==True]
#    if len(nan_df) > 1: #既有通过的，又有没通过的
#        eps = 0.0001
#        nan_val_cnt = nan_df.value_counts() + eps
#        tar_val_cnt = df[target].value_counts() + eps
#        
#        
#        nan_woe = np.log(
#                (nan_val_cnt[1]/nan_val_cnt[0])/
#                (tar_val_cnt[1]/tar_val_cnt[0])
#                )
#        nan_iv = (nan_val_cnt[1]/tar_val_cnt[1] - nan_val_cnt[0]/tar_val_cnt[0]) * nan_woe
#        nan_rec = {
#          'count':len(nan_df),
#          'all_pct':len(nan_df)/len(df),
#         'p_count':int(nan_val_cnt[1]),
#         'p_pct':nan_val_cnt[1]/len(nan_df),
#         'n_count':int(nan_val_cnt[0]),
#         'n_pct': nan_val_cnt[0]/len(nan_df),
#         'woe':nan_woe,
#         'iv':nan_iv}
#    else: #只有通过或只有没通过 woe会出现无穷值的情况
#       for key in ['count', 'all_pct', 'p_count','p_pct','n_count', 'n_pct', 'woe', 'iv', ]:
#           nan_rec[key]= 0
#    nan_DF = pd.DataFrame(nan_rec,index=['Nan'])
#    return nan_DF
def cal_nan_distri(df,col,target): 
    '''
    计算空值的的woe和iv，并加到非空计算的DataFrame里面
    df:DataFrame数据
    col:字段名
    target:目标变量名
    '''
    nan_rec = {}
    nan_df = df[target][df[col].isnull()==True]
    
    if len(nan_df.value_counts()) > 1: #既有通过的，又有没通过的
        eps = 0.0001
        nan_val_cnt = nan_df.value_counts() + eps
        tar_val_cnt = df[target].value_counts() + eps
        
        nan_woe = np.log((nan_val_cnt[1]/nan_val_cnt[0])/(tar_val_cnt[1]/tar_val_cnt[0]))
        nan_iv = (nan_val_cnt[1]/tar_val_cnt[1] - nan_val_cnt[0]/tar_val_cnt[0]) * nan_woe
        nan_rec = {
          'count':len(nan_df),
          'all_pct':len(nan_df)/len(df),
         'p_count':int(nan_val_cnt[1]),
         'p_pct':nan_val_cnt[1]/len(nan_df),
         'n_count':int(nan_val_cnt[0]),
         'n_pct': nan_val_cnt[0]/len(nan_df),
         'woe':nan_woe,
         'iv':nan_iv}
    else: #只有通过或只有没通过 woe会出现无穷值的情况
       for key in ['count', 'all_pct', 'p_count','p_pct','n_count', 'n_pct', 'woe', 'iv', ]:
           nan_rec[key]= 0
    nan_DF = pd.DataFrame(nan_rec,index=['Nan'])
    return nan_DF

def cat_binning(df,col,target):
    '''
    类别分箱
    '''
    df2 = df[[col,target]] #只保留本列和标签列
    df2 = df2.replace({math.inf:None})#无穷值按照缺失值处理
#    nan_rec = cal_nan_distri(df2,col,target)
    df2 = df2[df2[col].isnull()==False]
    
    return df2[col].value_counts().head()
    

'''
======================================
               等频分箱
======================================
'''

def equal_frequency_binning(df,col,target):
    '''
    等频分箱
    '''
    
    df2 = df[[col,target]] #只保留本列和标签列
    df2 = df2.replace({math.inf:None})#无穷值按照缺失值处理
    nan_rec = cal_nan_distri(df2,col,target)
    df2 = df2[df2[col].isnull()==False]
    
    
    cats,bins = pd.qcut(df2[col],q=4,retbins=True,precision=4,duplicates='drop')
    # 整体分布
    Ser1 = (pd.value_counts(cats)).sort_index()
    #Ser2 = (Ser1 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
    Ser2 = Ser1 / sum(df2[col])
    
    # 计算woe和iv
    woe, iv, iv_sum = cal_WOE_IV(df2, cats, target)
    Ser_woe = pd.DataFrame(list(woe.values()), index=Ser1.index)
    Ser_iv = pd.DataFrame(list(iv.values()), index=Ser1.index)

    # positive分布
    p_df = df2[df2[target] == 1]
    p_cats = pd.cut(p_df[col], bins, duplicates='drop',precision=4, include_lowest=True,)
    Ser3 = (pd.value_counts(p_cats)).sort_index()
    #Ser4 = (Ser3 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
    Ser4 = Ser3 / sum(df2[col])

    # negative分布
    n_df = df2[df2[target] == 0]
    n_cats = pd.cut(n_df[col], bins, duplicates='drop',precision=4, include_lowest=True)
    Ser5 = (pd.value_counts(n_cats)).sort_index()
    #Ser6 = (Ser5 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
    Ser6 = Ser5 / sum(df2[col])


    new_df = pd.concat([Ser1, Ser2, Ser3, Ser4, Ser5,Ser6, Ser_woe, Ser_iv], axis=1)
    new_df.columns = ['count', 'all_pct', 'p_count','p_pct','n_count', 'n_pct', 'woe', 'iv', ]
    new_df = new_df.sort_index()
    new_df.to_csv('tmp_df.csv',index=True)

    #添加空值的属性
    add_nan_df = new_df.append(nan_rec, ignore_index=True)
    
    total = {
        'count':add_nan_df['count'].sum(),
        'all_pct':add_nan_df['all_pct'].sum(),
        
        'p_count':add_nan_df['p_count'].sum(),
        'p_pct':add_nan_df['p_pct'].sum(),
        
        'n_count':add_nan_df['n_count'].sum(),
        'n_pct':add_nan_df['n_pct'].sum(), 
        
        'woe':add_nan_df['woe'].sum(), 
        'iv':add_nan_df['iv'].sum(),}
    #添加合计的属性
    add_total_df = add_nan_df.append(total,ignore_index=True)
    
    temp_df = pd.read_csv('tmp_df.csv')

    temp_df_index = temp_df['Unnamed: 0'].values.tolist()
    temp_df_index.append('Nan')
    temp_df_index.append('Total')
    add_total_df.index = temp_df_index

    
    return add_total_df,iv_sum

'''
======================================
               卡方分箱
======================================
'''

def calc_chiSquare(df, feature, target):
    '''
    计算某个特征每种属性值的卡方统计量
    params: 
        sampleSet: 样本集
        feature: 目标特征
        target: 目标Y值 (0或1) Y值为二分类变量
    return:
        卡方统计量dataframe
        feature: 特征名称
        act_target_cnt: 实际坏样本数
        expected_target_cnt：期望坏样本数
        chi_square：卡方统计量
    '''
    # 计算样本期望频率
    target_cnt = df[target].sum()
    sample_cnt = len(df[target])
    expected_ratio = target_cnt * 1.0/sample_cnt 
    # 对变量按属性值从大到小排序
    df = df[[feature, target]]
    col_value = list(set(df[feature]))   
    # 计算每一个属性值对应的卡方统计量等信息
    chi_list = []; target_list = []; expected_target_list = []
    for value in col_value:
        df_target_cnt = df.loc[df[feature] == value, target].sum()
        df_cnt = len(df.loc[df[feature] == value, target])
        expected_target_cnt = df_cnt * expected_ratio
        chi_square = (df_target_cnt - expected_target_cnt)**2 / expected_target_cnt
        chi_list.append(chi_square)
        target_list.append(df_target_cnt)
        expected_target_list.append(expected_target_cnt)
    # 结果输出到dataframe, 对应字段为特征属性值, 卡方统计量, 实际坏样本量, 期望坏样本量
    chi_stats = pd.DataFrame({feature:col_value, 'chi_square':chi_list,
                               'act_target_cnt':target_list, 'expected_target_cnt':expected_target_list})
    return chi_stats[[feature, 'act_target_cnt', 'expected_target_cnt', 'chi_square']]

def merge_chiSquare(chi_result, index, mergeIndex, a = 'expected_target_cnt',
                    b = 'act_target_cnt', c = 'chi_square'):
    '''
    params:
        chi_result: 待合并卡方数据集
        index: 合并后的序列号
        mergeIndex: 需合并的区间序号
        a, b, c: 指定合并字段
    return:
        分箱合并后的卡方dataframe
    '''
    chi_result.loc[mergeIndex, a] = chi_result.loc[mergeIndex, a] + chi_result.loc[index, a]
    chi_result.loc[mergeIndex, b] = chi_result.loc[mergeIndex, b] + chi_result.loc[index, b]
    chi_result.loc[mergeIndex, c] = (chi_result.loc[mergeIndex, b] - chi_result.loc[mergeIndex, a])**2 /chi_result.loc[mergeIndex, a]
    chi_result = chi_result.drop([index])
    chi_result = chi_result.reset_index(drop=True)
    return chi_result 

##=======================================================
def get_chiSquare_distuibution(dfree=4, cf=0.1):
    '''
    根据自由度和置信度得到卡方分布和阈值
    params:
        dfree: 自由度, 最大分箱数-1, default 4
        cf: 显著性水平, default 10%
    return:
        卡方阈值
    '''
    percents = [0.95, 0.90, 0.5, 0.1, 0.05, 0.025, 0.01, 0.005]
    df = pd.DataFrame(np.array([chi2.isf(percents, df=i) for i in range(1, 30)]))
    df.columns = percents
    df.index = df.index+1
    # 显示小数点后面数字
    pd.set_option('precision', 3)
    return df.loc[dfree, cf]

def chiMerge_minChiSquare(chi_stats, feature, dfree=4, cf=0.1, maxInterval=5):
    '''
    卡方分箱合并--卡方阈值法
    params:
        chi_stats: 卡方统计量dataframe
        feature: 目标特征
        maxInterval: 最大分箱数阈值, default 5
        dfree: 自由度, 最大分箱数-1, default 4
        cf: 显著性水平, default 10%
    return:
        卡方合并结果dataframe, 特征分割split_list
    '''
    threshold = get_chiSquare_distuibution(dfree, cf)
    min_chiSquare = chi_stats['chi_square'].min()
    group_cnt = len(chi_stats)
    split_list = [chi_stats[feature].min()]
    # 如果变量区间的最小卡方值小于阈值，则继续合并直到最小值大于等于阈值
    while(min_chiSquare < threshold and group_cnt > maxInterval):
        min_index = chi_stats[chi_stats['chi_square']==chi_stats['chi_square'].min()].index.tolist()[0]
        # 如果分箱区间在最前,则向下合并
        if min_index == 0:
            chi_stats = merge_chiSquare(chi_stats, min_index+1, min_index)
        # 如果分箱区间在最后，则向上合并
        elif min_index == group_cnt-1:
            chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
        # 如果分箱区间在中间，则判断与其相邻的最小卡方的区间，然后进行合并
        else:
            if chi_stats.loc[min_index-1, 'chi_square'] > chi_stats.loc[min_index+1, 'chi_square']:
                chi_stats = merge_chiSquare(chi_stats, min_index, min_index+1)
            else:
                chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
        min_chiSquare = chi_stats['chi_square'].min()
        group_cnt = len(chi_stats)
    chiMerge_result = chi_stats
    split_list.extend(chiMerge_result[feature].tolist())
    return chiMerge_result, split_list
##=======================================================

def chiMerge_maxInterval(chi_stats, feature, maxInterval=5):
    '''
    卡方分箱合并--最大区间限制法
    params:
        chi_stats: 卡方统计量dataframe
        feature: 目标特征
        maxInterval：最大分箱数阈值
    return:
        卡方合并结果dataframe, 特征分割split_list
    '''
    group_cnt = len(chi_stats)
    split_list = [chi_stats[feature].min()]
    # 如果变量区间超过最大分箱限制，则根据合并原则进行合并
    while(group_cnt > maxInterval):
        min_index = chi_stats[chi_stats['chi_square']==chi_stats['chi_square'].min()].index.tolist()[0]
        # 如果分箱区间在最前,则向下合并
        if min_index == 0:
            chi_stats = merge_chiSquare(chi_stats, min_index+1, min_index)
        # 如果分箱区间在最后，则向上合并
        elif min_index == group_cnt-1:
            chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
        # 如果分箱区间在中间，则判断与其相邻的最小卡方的区间，然后进行合并
        else:
            if chi_stats.loc[min_index-1, 'chi_square'] > chi_stats.loc[min_index+1, 'chi_square']:
                chi_stats = merge_chiSquare(chi_stats, min_index, min_index+1)
            else:
                chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)
        group_cnt = len(chi_stats)
    chiMerge_result = chi_stats
    split_list.extend(chiMerge_result[feature].tolist())
    return chiMerge_result, split_list


def chi2_binning(df,col,target):
    '''
    df:原始数据
    col:要分箱的列
    target:目标变量
    '''
    df2 = df[[col,target]] #只保留本列和标签列
    df2 = df2.replace({math.inf:None})
    nan_rec = cal_nan_distri(df2,col,target)
    
    df2 = df2[df2[col].isnull()==False]
    
    chi_stats = calc_chiSquare(df2, col, target)
#    chiMerge_result, split_list = chiMerge_maxInterval(chi_stats, col, maxInterval=5) # 卡方分箱合并--最大区间限制法
    chiMerge_result, split_list = chiMerge_minChiSquare(chi_stats, col, dfree=4, cf=0.5, maxInterval=5) # 卡方分箱合并--卡方阈值法

    
    
    bins = sorted(split_list)
#    bins.remove(bins[-1])
#    bins.append(math.inf)
#    print(bins)
    
    cats = pd.cut(df2[col],bins=bins,duplicates='drop',include_lowest=True,)
    
    # 整体分布
    Ser1 = (pd.value_counts(cats)).sort_index()
    #Ser2 = (Ser1 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
    Ser2 = Ser1 / len(df)
    
    # 计算woe和iv
    woe, iv, iv_sum = cal_WOE_IV(df2, cats, target)
    Ser_woe = pd.DataFrame(list(woe.values()), index=Ser1.index)
    Ser_iv = pd.DataFrame(list(iv.values()), index=Ser1.index)

    # positive分布
    p_df = df2[df2[target] == 1]
    p_cats = pd.cut(p_df[col], bins, duplicates='drop',include_lowest=True,)
    Ser3 = (pd.value_counts(p_cats)).sort_index()
    #Ser4 = (Ser3 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
    Ser4 = Ser3 / len(df)

    # negative分布
    n_df = df2[df2[target] == 0]
    n_cats = pd.cut(n_df[col], bins, duplicates='drop', include_lowest=True)
    Ser5 = (pd.value_counts(n_cats)).sort_index()
    #Ser6 = (Ser5 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
    Ser6 = Ser5 / len(df)


    new_df = pd.concat([Ser1, Ser2, Ser3, Ser4, Ser5,Ser6, Ser_woe, Ser_iv], axis=1)
    new_df.columns = ['count', 'all_pct', 'p_count','p_pct','n_count', 'n_pct', 'woe', 'iv', ]
    new_df = new_df.sort_index()
    new_df.to_csv('tmp_df.csv',index=True)

    #添加空值的属性
    add_nan_df = new_df.append(nan_rec, ignore_index=True)
    
    total = {
        'count':add_nan_df['count'].sum(),
        'all_pct':add_nan_df['all_pct'].sum(),
        
        'p_count':add_nan_df['p_count'].sum(),
        'p_pct':add_nan_df['p_pct'].sum(),
        
        'n_count':add_nan_df['n_count'].sum(),
        'n_pct':add_nan_df['n_pct'].sum(), 
        
        'woe':add_nan_df['woe'].sum(), 
        'iv':add_nan_df['iv'].sum(),}
    #添加合计的属性
    add_total_df = add_nan_df.append(total,ignore_index=True)
    
    temp_df = pd.read_csv('tmp_df.csv')

    temp_df_index = temp_df['Unnamed: 0'].values.tolist()
    temp_df_index.append('Nan')
    temp_df_index.append('Total')
    add_total_df.index = temp_df_index
#    print(add_total_df)
    
    return add_total_df,iv_sum

'''
==========================================
            基于CART的最优分箱
==========================================
'''

def optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:
    '''
    利用决策树获得最优分箱的边界值列表
    '''
    boundary = []  # 待return的分箱边界值列表
    x = x.replace({math.inf:nan})
    x = x.fillna(nan).values  # 填充缺失值
    y = y.values
    
    clf = DecisionTreeClassifier(criterion='gini',    #“基尼系数”最小化准则划分
                                 max_leaf_nodes=6,       # 最大叶子节点数
                                 min_samples_leaf=0.05)  # 叶子节点样本数量最小占比
 
    clf.fit(x.reshape(-1, 1), y)  # 训练决策树
    
    n_nodes = clf.tree_.node_count
    children_left = clf.tree_.children_left
    children_right = clf.tree_.children_right
    threshold = clf.tree_.threshold
    
    for i in range(n_nodes):
        if children_left[i] != children_right[i]:  # 获得决策树节点上的划分边界值
            boundary.append(threshold[i])
 
    boundary.sort()
 
    min_x = x.min()
    max_x = x.max() + 0.1  # +0.1是为了考虑后续groupby操作时，能包含特征最大值的样本
    boundary = [min_x] + boundary + [max_x]
 
    return boundary



'''
=====================================
free（自由）分箱 : 自由选择分箱方式
======================================
'''


def free_binning(df,col,target,bins=None):
    '''
    根据参数选择分箱方式，并计算各个指标！
    equal_frequence_binning
    chi2_max_interval
    chi2_min_threshold
    '''

    if bins == 'equal_frequence_binning':
        return equal_frequency_binning(df,col,target)
        
    else:
        df2 = df[[col,target]] #只保留本列和标签列
        df2 = df2.replace({math.inf:None})#无穷值按照缺失值处理
        nan_rec = cal_nan_distri(df2,col,target)
        df2 = df2[df2[col].isnull()==False]
        
        if bins == 'chi2_max_interval':
            chi_stats = calc_chiSquare(df2, col, target)
            chiMerge_result, split_list = chiMerge_maxInterval(chi_stats, col, maxInterval=5) # 卡方分箱合并--最大区间限制法
            bins = sorted(split_list)
        elif bins == 'chi2_min_threshold':
            chi_stats = calc_chiSquare(df2, col, target)
            chiMerge_result, split_list = chiMerge_minChiSquare(chi_stats, col, dfree=4, cf=0.1, maxInterval=5) # 卡方分箱合并--卡方阈值法
            bins = sorted(split_list)
        elif bins == 'optimal_binning':
            split_list = optimal_binning_boundary(df2[col], df2[target], nan=-999)
            bins = sorted(split_list)
        else:
            bins = sorted(bins)
            
        bins.remove(bins[-1])
        bins.append(math.inf)
        bins[0] = bins[0]-0.1
        
        cats = pd.cut(df2[col],bins=bins,duplicates='drop',precision=4,include_lowest=False,)
       
         # 整体分布
        Ser1 = (pd.value_counts(cats)).sort_index()
        #Ser2 = (Ser1 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
        Ser2 = Ser1 / len(df)
    #    print(Ser1,Ser2)
        
        # 计算woe和iv
        woe, iv, iv_sum = cal_WOE_IV(df2, cats, target)
        Ser_woe = pd.DataFrame(list(woe.values()), index=Ser1.index)
        Ser_iv = pd.DataFrame(list(iv.values()), index=Ser1.index)
    
        # positive分布
        p_df = df2[df2[target] == 1]
        p_cats = pd.cut(p_df[col], bins, duplicates='drop',precision=4,include_lowest=False,)
        Ser3 = (pd.value_counts(p_cats)).sort_index()
        #Ser4 = (Ser3 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
        Ser4 = Ser3 / len(df)
    
        # negative分布
        n_df = df2[df2[target] == 0]
        n_cats = pd.cut(n_df[col], bins, duplicates='drop',precision=4, include_lowest=False)
        Ser5 = (pd.value_counts(n_cats)).sort_index()
        #Ser6 = (Ser5 / sum(Ser1)).apply(lambda x: '%.2f%%' % (x * 100))
        Ser6 = Ser5 / len(df)
    
    
        new_df = pd.concat([Ser1, Ser2, Ser3, Ser4, Ser5,Ser6, Ser_woe, Ser_iv], axis=1)
        new_df.columns = ['count', 'all_pct', 'p_count','p_pct','n_count', 'n_pct', 'woe', 'iv', ]
        new_df = new_df.sort_index()
        new_df.to_csv('tmp_df.csv',index=True)
    
        #添加空值的属性
        add_nan_df = new_df.append(nan_rec, ignore_index=True)
        
        total = {
            'count':add_nan_df['count'].sum(),
            'all_pct':add_nan_df['all_pct'].sum(),
            
            'p_count':add_nan_df['p_count'].sum(),
            'p_pct':add_nan_df['p_pct'].sum(),
            
            'n_count':add_nan_df['n_count'].sum(),
            'n_pct':add_nan_df['n_pct'].sum(), 
            
            'woe':add_nan_df['woe'].sum(), 
            'iv':add_nan_df['iv'].sum(),}
        #添加合计的属性
        add_total_df = add_nan_df.append(total,ignore_index=True)
        
        temp_df = pd.read_csv('tmp_df.csv')
    
        temp_df_index = temp_df['Unnamed: 0'].values.tolist()
        temp_df_index.append('Nan')
        temp_df_index.append('Total')
        add_total_df.index = temp_df_index
    #    print(add_total_df)
        
        return add_total_df,iv_sum
    
def default_binning(df,target,dist_out_file):
    df2 = df.copy()
    
    binning_type1 = 'equal_frequence_binning'
    # binning_type2 = 'chi2_max_interval'
    binning_type2 = 'chi2_min_threshold'
    binning_type3 = 'optimal_binning'
    
    headings = ['列名','中文描述',
                binning_type1,'%s_IV'%binning_type1,
                binning_type2,'%s_IV'%binning_type2,
                binning_type3,'%s_IV'%binning_type3,
               ]
    cols_name = []
    cols_dist1 = []
    cols_dist2 = []
    cols_dist3 = []
    iv_sum1 = []
    iv_sum2 = []
    iv_sum3 = []
    
    for i in trange(df2.shape[1]):#df2.shape[1]
        col_name = df2.columns[i]
        if col_name != target:
            cols_name.append(col_name)
            dist1,sum_iv1 = free_binning(df2,col_name,target,bins=binning_type1)
            dist2,sum_iv2 = free_binning(df2,col_name,target,bins=binning_type2) 
            dist3,sum_iv3 = free_binning(df2,col_name,target,bins=binning_type3) 
    
            dist1=dist_out_helper.format_output(dist1)
            dist2=dist_out_helper.format_output(dist2)
            dist3=dist_out_helper.format_output(dist3)
            
            cols_dist1.append(dist1.to_string())
            cols_dist2.append(dist2.to_string())
            cols_dist3.append(dist3.to_string())
            iv_sum1.append(sum_iv1)
            iv_sum2.append(sum_iv2)
            iv_sum3.append(sum_iv3)
    print('All Done!')  
    
    cn_df = pd.read_excel('data/input_data/cols_desc_V2.1.xlsx')
    cols_cn_desc = cn_df['cn_desc(中文解释)'][cn_df['variable_name(字段名)'].isin(cols_name)].values.tolist()
    dist_out_helper.result2excel(dist_out_file,headings,cols_name,cols_cn_desc,
             cols_dist1,iv_sum1,
             cols_dist2,iv_sum2,
             cols_dist3,iv_sum3)
    print('Write Success!')
    return None
        
        
        
        
    
    
    
    

 









    