from sklearn import tree
import math
import pandas as pd
import numpy as np
from sklearn import tree
import matplotlib as plt
data=pd.read_csv('UCI_Credit_Card.csv',index_col=[0])
#pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns',None)
column={'ID':'用户ID',
        'SeriousDlqin2yrs':'好坏客户',
        'RevolvingUtilizationOfUnsecuredLines':'可用额度比值',
        'age':'年龄',
        'NumberOfTime30-59DaysPastDueNotWorse':'逾期30-59天笔数',
        'DebtRatio':'负债率',
        'MonthlyIncome':'月收入',
        'NumberOfOpenCreditLinesAndLoans':'信贷数量',
        'NumberOfTimes90DaysLate':'逾期90天笔数',
        'NumberRealEstateLoansOrLines':'固定资产贷款量',
        'NumberOfTime60-89DaysPastDueNotWorse':'逾期60-89天笔数',
        'NumberOfDependents':'家属数量',
        'DefaultRate':'违约率',
        'GoodDebt':'从未拖欠的信贷数量',
        'RemainingIncome':'每月剩余收入',
        'EstateLoan':'固定资产贷款率',
        'ComsumptionLevel':'每次消费所使用的额度比例',
        'AverageIncome':'家庭成员人均收入',
        'AverageDebtLevel':'人均债务水平',
        'AverageRemainingIncome':'人均剩余收入',
        'CreaditUsedRatio':'个人信贷已用额度比值',
        '30-59DaysRatio': '逾期30-59天笔数占逾期总数的比例',
        '60-89DaysRatio': '逾期60-89天笔数占逾期总数的比例',
        'Over90DaysRatio': '逾期90天笔数占逾期总数的比例',
        'Debt1':'是否负债过度',
        'CreaditRatio':'是否信用额度使用超额'}
# data.rename(columns=column,inplace=True)
rs = 18  # 分层抽样生成种子 # 表现较好-3、7、18(最好)
# 4 10 17
# print(data.describe())
# print(data.info())
data.drop_duplicates(inplace=True)    # 去重

print(data.describe())



# 特征处理

LIMIT_BAL_ =data['LIMIT_BAL'].mean()+3*data['LIMIT_BAL'].std()
data = data[data['LIMIT_BAL'] <= LIMIT_BAL_]

data = data.drop(['SEX'], axis=1)
data = data.drop(['MARRIAGE'], axis=1)

# 数据集信息
print(data.info())
print(data.describe())
# print(data['MonthlyIncome'].isnull())
# print(data['NumberOfDependents'].isnull())

def cal_Chi2(df):
    """从列联表计算出卡方值"""
    res = []
    # 计算values的和
    num_sum = sum(df.values.flatten())
    for i in range(df.shape[0]):
        for j in range(df.shape[1]):
            # 计算位置i,j上的期望值
            e = sum(df.iloc[i, :]) * sum(df.iloc[:, j]) / num_sum
            tt = (df.iloc[i, j] - e) ** 2 / e
            res.append(tt)
    return sum(res)


def line_merge(df, i, j):
    """将i,j行合并"""
    df.iloc[i, 1] = df.iloc[i, 1] + df.iloc[j, 1]
    df.iloc[i, 2] = df.iloc[i, 2] + df.iloc[j, 2]
    df.iloc[i, 0] = df.iloc[j, 0]
    df = pd.concat([df.iloc[:j, :], df.iloc[j + 1:, :]])
    return df


# 定义一个卡方分箱（可设置参数置信度水平与箱的个数）停止条件为大于置信水平且小于bin的数目(confidenceVal-置信度，bin-最大分箱数）
def ChiMerge(df, variable, flag='default.payment.next.month', confidenceVal=9.542, bin=22): # bin=8左右为无零区 bin=20最好
    '''
    df:传入一个数据框仅包含一个需要卡方分箱的变量与正负样本标识（正样本为1，负样本为0）
    variable:需要卡方分箱的变量名称（字符串）
    flag：正负样本标识的名称（字符串）
    confidenceVal：置信度水平（默认是不进行抽样95%）
    bin：最多箱的数目
    '''
    x1 = data[variable].values
    boundary = []
    # 进行数据格式化录入
    regroup = df.groupby([variable])[flag].agg(["size", "sum"])
    regroup.columns = ['total_num', 'positive_class']
    regroup['negative_class'] = regroup['total_num'] - regroup['positive_class']  # 统计需分箱变量每个值负样本数
    regroup = regroup.drop('total_num', axis=1).reset_index()
    col_names = regroup.columns

    print('已完成数据读入,正在计算数据初处理')

    # 处理连续没有正样本或负样本的区间，并进行区间的合并（以免卡方值计算报错）
    i = 0
    while (i <= regroup.shape[0] - 2):
        # 如果正样本(1)列或负样本(2)列的数量求和等于0 (求和等于0,说明i和i+1行的值都等于0)
        if sum(regroup.iloc[[i, i + 1], [1, 2]].sum() == 0) > 0:
            # 合并两个区间
            regroup = line_merge(regroup, i, i + 1)
            i = i - 1
        i = i + 1

        # 对相邻两个区间进行卡方值计算
    chi_ls = []  # 创建一个数组保存相邻两个区间的卡方值
    for i in np.arange(regroup.shape[0] - 1):
        chi = cal_Chi2(regroup.iloc[[i, i + 1], [1, 2]])
        chi_ls.append(chi)

    print('已完成数据初处理，正在进行卡方分箱核心操作')

    # 把卡方值最小的两个区间进行合并（卡方分箱核心）
    while True:
        if (len(chi_ls) <= (bin - 1) and min(chi_ls) >= confidenceVal):
            break

        min_ind = chi_ls.index(min(chi_ls))  # 找出卡方值最小的位置索引
        #       合并两个区间
        regroup = line_merge(regroup, min_ind, min_ind + 1)

        if (min_ind == regroup.shape[0] - 1):  # 最小值是最后两个区间的时候
            # 计算合并后当前区间与前一个区间的卡方值并替换
            chi_ls[min_ind - 1] = cal_Chi2(regroup.iloc[[min_ind, min_ind - 1], [1, 2]])
            # 删除替换前的卡方值
            del chi_ls[min_ind]

        else:
            # 计算合并后当前区间与前一个区间的卡方值并替换
            chi_ls[min_ind - 1] = cal_Chi2(regroup.iloc[[min_ind, min_ind - 1], [1, 2]])

            # 计算合并后当前区间与后一个区间的卡方值并替换
            chi_ls[min_ind] = cal_Chi2(regroup.iloc[[min_ind, min_ind + 1], [1, 2]])

            # 删除替换前的卡方值
            del chi_ls[min_ind + 1]

    print('已完成卡方分箱核心操作，正在保存结果')

    # 把结果保存成一个数据框

    list_temp = []
    for i in np.arange(regroup.shape[0]):
        if i == 0:
            x = '-inf' + '~' + str(regroup.iloc[i, 0])
        elif i == regroup.shape[0] - 1:
            x = str(regroup.iloc[i - 1, 0]) + '+'
        else:
            x = str(regroup.iloc[i - 1, 0]) + '~' + str(regroup.iloc[i, 0])
        list_temp.append(x)
    regroup[variable] = list_temp  # 结果表第二列：区间
    print(regroup)
    # print(type(regroup))
    # print(regroup[variable])
    # print(list(regroup[variable][:-1]))
    for i in list(regroup[variable])[:-1]:
        boundary.append(eval(i.split('~')[1]))
    # print(boundary)

    min_x = x1.min()
    max_x = x1.max() + 0.1  # +0.1是为了考虑后续groupby操作时，能包含特征最大值的样本
    boundary = [min_x] + boundary + [max_x]
    # print(boundary)
    boundary = list(set(boundary))
    boundary.sort()
    print(boundary)
    # return regroup
    return boundary
# 信息价值计算
def feature_woe_iv(x: pd.Series, y: pd.Series, column, data) -> pd.DataFrame:

    #计算变量各个分箱的WOE、IV值，返回一个DataFrame

    # x = x.fillna(nan)
    print(column)
    boundary = ChiMerge(df=data,variable=column) # 获得最优分箱边界值列表
    df = pd.concat([x, y], axis=1)  # 合并x、y为一个DataFrame，方便后续计算
    df.columns = ['x', 'y']  # 特征变量、目标变量字段的重命名
    data.loc[:, column] = pd.cut(data.loc[:, column], bins=boundary, labels=[i for i in range(len(boundary) - 1)],include_lowest=True)  # 获得每个x值所在的分箱区间
    df['bins'] = pd.cut(x=x, bins=boundary, right=False)  # 获得每个x值所在的分箱区间

    grouped = df.groupby('bins')['y']  # 统计各分箱区间的好、坏、总客户数量
    result_df = grouped.agg([('good', lambda y: (y == 0).sum()),
                             ('bad', lambda y: (y == 1).sum()),
                             ('total', 'count')])

    result_df['good_pct'] = result_df['good'] / result_df['good'].sum()  # 好客户占比
    result_df['bad_pct'] = result_df['bad'] / result_df['bad'].sum()  # 坏客户占比
    result_df['total_pct'] = result_df['total'] / result_df['total'].sum()  # 总客户占比

    result_df['bad_rate'] = result_df['bad'] / result_df['total']  # 坏比率

    result_df['woe'] = np.log(result_df['good_pct'] / result_df['bad_pct'])  # WOE
    result_df['iv'] = (result_df['good_pct'] - result_df['bad_pct']) * result_df['woe']  # IV
    if result_df['iv'].sum() <= -0.05:
        data = data.drop([column], axis=1)
    print(column + f"该变量IV = {result_df['iv'].sum()}")

    return data

# data.iloc[:,0] 选择第一列
# data。iloc[0] 选择第一行
data.insert(0, 'default.payment.next.month', data.pop('default.payment.next.month'))

for column in data.iloc[:,1:].columns: #选择data第二列到最后一列 # IV值计算
    data = feature_woe_iv(x=data[column], y=data['default.payment.next.month'],column =column ,data=data) # 进行决策树分箱和iv值计算
data = data.reset_index(drop=True)
# data.to_csv('cs-training1.csv', index=1)


# 数据集划分

from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=rs) # 0.2 0.3
# 根据mnist["target"]来进行分层采样
for train_index, test_index in split.split(data.iloc[:, 1:], data.iloc[:, 0]): # split_split(X,y)
    user_train = data.iloc[train_index]
    user_train_target = user_train['default.payment.next.month']
    user_test = data.iloc[test_index]
    user_test_target = user_test['default.payment.next.month']
user_train.to_csv('UCI_Credit_Card_train_chi.csv', index=1)
user_test.to_csv('UCI_Credit_Card_test_chi.csv', index=1)