# from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import math
import pandas as pd
import numpy as np
from sklearn import tree
import matplotlib as plt
data=pd.read_csv('UCI_Credit_Card.csv',index_col=[0])
#pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns',None)
column={'ID':'用户ID',
        'SeriousDlqin2yrs':'好坏客户',
        'RevolvingUtilizationOfUnsecuredLines':'可用额度比值',
        'age':'年龄',
        'NumberOfTime30-59DaysPastDueNotWorse':'逾期30-59天笔数',
        'DebtRatio':'负债率',
        'MonthlyIncome':'月收入',
        'NumberOfOpenCreditLinesAndLoans':'信贷数量',
        'NumberOfTimes90DaysLate':'逾期90天笔数',
        'NumberRealEstateLoansOrLines':'固定资产贷款量',
        'NumberOfTime60-89DaysPastDueNotWorse':'逾期60-89天笔数',
        'NumberOfDependents':'家属数量',
        'DefaultRate':'违约率',
        'GoodDebt':'从未拖欠的信贷数量',
        'RemainingIncome':'每月剩余收入',
        'EstateLoan':'固定资产贷款率',
        'ComsumptionLevel':'每次消费所使用的额度比例',
        'AverageIncome':'家庭成员人均收入',
        'AverageDebtLevel':'人均债务水平',
        'AverageRemainingIncome':'人均剩余收入',
        'CreaditUsedRatio':'个人信贷已用额度比值',
        '30-59DaysRatio': '逾期30-59天笔数占逾期总数的比例',
        '60-89DaysRatio': '逾期60-89天笔数占逾期总数的比例',
        'Over90DaysRatio': '逾期90天笔数占逾期总数的比例',
        'Debt1':'是否负债过度',
        'CreaditRatio':'是否信用额度使用超额'}
# data.rename(columns=column,inplace=True)
rs1 = 16 # 分层抽样生成种子 # 表现较好-1、11、16（最好）

print(data.describe())
# print(data.info())

data.drop_duplicates(inplace=True)    # 去重


# 特征处理

LIMIT_BAL_=data['LIMIT_BAL'].mean()+3*data['LIMIT_BAL'].std()
data = data[data['LIMIT_BAL'] <= LIMIT_BAL_]

BILL_AMT=['BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6']
PAY_AMT=['PAY_AMT1','PAY_AMT2','PAY_AMT3','PAY_AMT4','PAY_AMT5','PAY_AMT6']
for i in BILL_AMT:
    BILL_AMT_=data[i].mean()+3*data[i].std()
    data = data[data[i] <= BILL_AMT_]
for i in PAY_AMT:
    PAY_AMT_=data[i].mean()+3*data[i].std()
    data = data[data[i] <= PAY_AMT_]



data = data.drop(['SEX'], axis=1)
data = data.drop(['MARRIAGE'], axis=1)
# 决策树分箱
def optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:

        #利用决策树获得最优分箱的边界值列表
 
    boundary = []  # 待return的分箱边界值列表

    x = x.values  # 填充缺失值
    y = y.values

    clf = tree.DecisionTreeClassifier(criterion='entropy',  # “信息熵”最小化准则划分
                                 max_depth = 100,
                                 # max_leaf_nodes=20 ,
                                 max_leaf_nodes=22 , # 最大叶子节点数 #最优22
                                 min_samples_leaf=0.005,# 叶子节点样本数量最小占比
                                 # random_state=rs2
                                      )

    clf.fit(x.reshape(-1, 1), y)  # 训练决策树
    #reshape(1,-1)：直接变成了一行未知列了
    #reshape(-1,1)：直接变成了一列未知行了
    # print(tree.plot_tree(clf,filled=True))

    # plt.show()

    n_nodes = clf.tree_.node_count
    children_left = clf.tree_.children_left
    children_right = clf.tree_.children_right
    threshold = clf.tree_.threshold

    for i in range(n_nodes):
        if children_left[i] != children_right[i]:  # 获得决策树节点上的划分边界值
            boundary.append(threshold[i])
    print(boundary)
    boundary.sort()

    min_x = x.min()
    max_x = x.max() + 0.1  # +0.1是为了考虑后续groupby操作时，能包含特征最大值的样本
    boundary = [min_x] + boundary + [max_x]
    print(boundary)
    return boundary
# 信息价值计算
def feature_woe_iv(x: pd.Series, y: pd.Series, column, data) -> pd.DataFrame:

        #计算变量各个分箱的WOE、IV值，返回一个DataFrame

    # x = x.fillna(nan)
    boundary = optimal_binning_boundary(x, y)  # 获得最优分箱边界值列表
    print(boundary)
    df = pd.concat([x, y], axis=1)  # 合并x、y为一个DataFrame，方便后续计算
    df.columns = ['x', 'y']  # 特征变量、目标变量字段的重命名
    data.loc[:, column] = pd.cut(data.loc[:, column], bins=boundary, labels=[i for i in range(len(boundary) - 1)],include_lowest=True)  # 获得每个x值所在的分箱区间
    df['bins'] = pd.cut(x=x, bins=boundary, right=False)  # 获得每个x值所在的分箱区间

    grouped = df.groupby('bins')['y']  # 统计各分箱区间的好、坏、总客户数量
    result_df = grouped.agg([('good', lambda y: (y == 0).sum()),
                             ('bad', lambda y: (y == 1).sum()),
                             ('total', 'count')])

    result_df['good_pct'] = result_df['good'] / result_df['good'].sum()  # 好客户占比
    result_df['bad_pct'] = result_df['bad'] / result_df['bad'].sum()  # 坏客户占比
    result_df['total_pct'] = result_df['total'] / result_df['total'].sum()  # 总客户占比

    result_df['bad_rate'] = result_df['bad'] / result_df['total']  # 坏比率

    result_df['woe'] = np.log(result_df['good_pct'] / result_df['bad_pct'])  # WOE
    result_df['iv'] = (result_df['good_pct'] - result_df['bad_pct']) * result_df['woe']  # IV
    if result_df['iv'].sum() <= -0.05:
        data = data.drop([column], axis=1)
    print(column + f"该变量IV = {result_df['iv'].sum()}")

    return data

# data.iloc[:,0] 选择第一列
# data。iloc[0] 选择第一行

data.insert(0, 'default.payment.next.month', data.pop('default.payment.next.month'))
# print(data.describe())
# print(data.info())


for column in data.iloc[:,1:].columns: #选择data第二列到最后一列 # IV值计算
    data = feature_woe_iv(x=data[column], y=data['default.payment.next.month'],column =column ,data=data) # 进行决策树分箱和iv值计算

data = data.reset_index(drop=True)
# data.to_csv('cs-training1.csv', index=1)


# 数据集划分

from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=rs1) # 0.3
# 根据mnist["target"]来进行分层采样
for train_index, test_index in split.split(data.iloc[:, 1:], data.iloc[:, 0]): # split_split(X,y)
    user_train = data.iloc[train_index]
    user_train_target = user_train['default.payment.next.month']
    user_test = data.iloc[test_index]
    user_test_target = user_test['default.payment.next.month']
user_train.to_csv('UCI_Credit_Card_train_DT.csv', index=1)
user_test.to_csv('UCI_Credit_Card_test_DT.csv', index=1)

print(user_train.info())
