import pandas as pd
import numpy as np
from scipy.stats import chi2_contingency
from sklearn.linear_model import LogisticRegression as LR
import scikitplot as skplt
from sklearn import metrics


def qcut(data, col, num_bins=20, bins=None):
    '''
    数据分箱
    input:
        data, 数据集
        col, 需要分箱的列
        num_bins, 等频分箱个数，默认20
        bins, 手动传入分箱
    return:
        bins_array: type==>list(tuple, tuple...)
        
    '''
    #如果没有传入分箱边界，则等频分箱
    if not bins:
        data['qcut'], low_bounday = pd.qcut(data[col], num_bins, retbins=True, duplicates='drop')
    else:
        data['qcut'], low_bounday = pd.cut(data[col], bins, retbins=True)
    #去除分箱各个分类的个数
    
    grouped = data.groupby('qcut').SeriousDlqin2yrs.agg(['sum', 'count'])
    grouped.columns = ['count_1', 'total']
    grouped['count_0'] = grouped['total'] - grouped['count_1']
    
    bins_array = [*zip(low_bounday, low_bounday[1:], grouped.count_0, grouped.count_1)]

    return bins_array

def cal_woe_iv(bins_array):
    '''
    计算每个分箱的woe值
    input:
        bins_array: 上一步计算好的每个分箱包含 count_0, count_1
    '''
    col = ['low', 'up', 'count_0', 'count_1']
    df_bins = pd.DataFrame(bins_array, columns = col)
    df_bins['total'] = df_bins['count_0'] + df_bins['count_1']
    df_bins['percentage'] = df_bins['total'] / df_bins['total'].sum()
    #分箱内坏的比例
    df_bins['bad_rate'] = df_bins.count_1 / df_bins.total
    #坏的占总样本
    df_bins['bad%'] = df_bins.count_1 / df_bins.count_1.sum()
    df_bins['good%'] = df_bins.count_0 / df_bins.count_0.sum()
    df_bins['woe'] = np.log(df_bins['good%'] / df_bins['bad%'])
    #计算iv值
    df_bins['good-bad'] = df_bins['good%'] - df_bins['bad%']
    iv = (df_bins['good-bad'] * df_bins['woe']).sum()
    return df_bins, iv

def get_binsdf(data, col, n):
    '''
    获取分箱的边界
    '''
    #bins_array [(low bounday, up boundary, # of 0, # of 1)]
    bins_array = qcut(data, col, 20)
    #确保每个分箱都含有1和0
    for i in range(20):
        #确保第一组没有0， 其余组有0的话，向上合并
        while 0 in bins_array[0][2:]:
            bins_array[0] = (
                    bins_array[0][0],#low boundary
                    bins_array[1][1],#up boundary
                    bins_array[0][2] + bins_array[1][2], #sum of two bins #1
                    bins_array[0][3] + bins_array[1][3] #sum of two bins #0
                    )
            #删除第一行
            bins_array.pop(1)
    
        for i in range(len(bins_array)):
            if 0 in bins_array[i][2:]:
                #向上合并，所以是i-1，比如第i=1是，有0， 删除第1行，变成 
                bins_array[i-1] = (
                    bins_array[i-1][0],#low boundary
                    bins_array[i][1],#up boundary
                    bins_array[i-1][2] + bins_array[i][2], #sum of two bins #1
                    bins_array[i-1][3] + bins_array[i][3] #sum of two bins #0
                    )
                #删除第i行
                bins_array.pop(i)
                break
        #如果都没有需要合并的，提前结束循环
        else:
            break
    
    while len(bins_array) > n:
        p_value_list = []
        for i in range(len(bins_array) - 1):
            #取count_0, count_1的数量进行卡方检验
            x1 = bins_array[i][-2:]
            x2 = bins_array[i+1][-2:]
            p_value = chi2_contingency([x1,x2])[1]
            p_value_list.append(p_value)
        i = np.argmax(p_value_list)
        #合并分箱
        bins_array[i] = (
                bins_array[i][0],#分箱的下界
                bins_array[i+1][1],#下一个分箱的上界
                bins_array[i][2] + bins_array[i+1][2],
                bins_array[i][3] + bins_array[i+1][3]
                )
        bins_array.pop(i+1)
    
    bins_df, iv = cal_woe_iv(bins_array)
        
    return bins_df


def get_woe(df, col, label, bins):
    '''
    get woe value 
    input:
        df:dataset
        col:column
        label:label
        bins: bins edge
    return:
        woe value for specified column
    '''
    df['cut'] = pd.cut(df[col], bins)
    grouped = df.groupby('cut')[label].value_counts().unstack()
    grouped['bad%'] = grouped[1] / grouped[1].sum()
    grouped['good%'] = grouped[0] / grouped[0].sum()
    grouped['woe'] = np.log(grouped['good%'] / grouped['bad%'])
    
    return grouped['woe']
    

if __name__ == '__main__':
    model_set = pd.read_csv('model_set.csv')
    test_set = pd.read_csv('test_set.csv')
    
    auto_bins = {"RevolvingUtilizationOfUnsecuredLines":6,
                 "age":5,
                 "DebtRatio":3,
                 "MonthlyIncome":3,
                 "NumberOfOpenCreditLinesAndLoans":5}
    manual_bins = {"NumberOfTime30-59DaysPastDueNotWorse":[0, 1, 2, 13]
                 ,"NumberOfTimes90DaysLate":[0, 1, 2, 17]
                 ,"NumberRealEstateLoansOrLines":[0, 1, 2, 4, 54]
                 ,"NumberOfTime60-89DaysPastDueNotWorse":[0, 1, 2, 8]
                 ,"NumberOfDependents":[0, 1, 2, 3]}
    manual_bins = {k : [-np.inf, *v[:-1], np.inf] for k, v in manual_bins.items()}

    bins_of_column = {}
    #获取自动分箱的上下界
    for k in auto_bins.keys():
        df = get_binsdf(model_set, k, auto_bins[k])
        bin_edge = sorted(set(df.low).union(set(df.up)))
        #把分箱上下界弄成无限大，测试集/新来的数据有可能再分箱边界外
        bin_edge[0], bin_edge[-1] = -np.inf, np.inf
        bins_of_column[k] = bin_edge
    
    #得到所有特征的分箱上下界
    bins_of_column.update(manual_bins)

    #记录对应的woe值
    woe_dict = {}
    for k in bins_of_column.keys():
        woe_dict[k] = get_woe(model_set, k, 'SeriousDlqin2yrs', bins_of_column[k])
    #map woe value
    for k in bins_of_column.keys():
        model_set[k] = pd.cut(model_set[k], bins_of_column[k]).map(woe_dict[k])
        test_set[k] = pd.cut(test_set[k], bins_of_column[k]).map(woe_dict[k])
    
    col = ['RevolvingUtilizationOfUnsecuredLines', 'age',
       'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome',
       'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate',
       'NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse',
       'NumberOfDependents']
    
    X_train = model_set[col]
    y_train = model_set.iloc[:,0]
    X_test = test_set[col]
    y_test = test_set.iloc[:, 0]
    #build model
    lr = LR(C=.9).fit(X_train, y_train)
    
    #draw roc curve
    test_prob_df = pd.DataFrame(lr.predict_proba(X_test))
    skplt.metrics.plot_roc(y_test, test_prob_df, plot_macro=True, plot_micro=True)
    
    y_pre = lr.predict(X_test)
    
    metrics.confusion_matrix(y_test, y_pre)
    metrics.roc_auc_score(y_test, y_pre)
    
    
    ####网格调参
    from sklearn.model_selection import GridSearchCV
    estimator = LR()
    parameters = {'C':np.arange(.6, 1.2, .1)}
    gSearch = GridSearchCV(estimator, parameters)
    gSearch.fit(X_train, y_train)