# from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import math
import pandas as pd
import numpy as np
from sklearn import tree
import matplotlib.pyplot as plt
import seaborn as sn
data=pd.read_csv('cs-training.csv',index_col=[0])
#pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns',None)
column={'ID':'用户ID',
        'SeriousDlqin2yrs':'好坏客户',
        'RevolvingUtilizationOfUnsecuredLines':'可用额度比值',
        'age':'年龄',
        'NumberOfTime30-59DaysPastDueNotWorse':'逾期30-59天笔数',
        'DebtRatio':'负债率',
        'MonthlyIncome':'月收入',
        'NumberOfOpenCreditLinesAndLoans':'信贷数量',
        'NumberOfTimes90DaysLate':'逾期90天笔数',
        'NumberRealEstateLoansOrLines':'固定资产贷款量',
        'NumberOfTime60-89DaysPastDueNotWorse':'逾期60-89天笔数',
        'NumberOfDependents':'家属数量',
        'DefaultRate':'违约率',
        'GoodDebt':'从未拖欠的信贷数量',
        'RemainingIncome':'每月剩余收入',
        'EstateLoan':'固定资产贷款率',
        'ComsumptionLevel':'每次消费所使用的额度比例',
        'AverageIncome':'家庭成员人均收入',
        'AverageDebtLevel':'人均债务水平',
        'AverageRemainingIncome':'人均剩余收入',
        'CreaditUsedRatio':'个人信贷已用额度比值',
        '30-59DaysRatio': '逾期30-59天笔数占逾期总数的比例',
        '60-89DaysRatio': '逾期60-89天笔数占逾期总数的比例',
        'Over90DaysRatio': '逾期90天笔数占逾期总数的比例',
        'Debt1':'是否负债过度',
        'CreaditRatio':'是否信用额度使用超额'}
# data.rename(columns=column,inplace=True)
rs1 = 18  # 分层抽样生成种子 # 表现较好-3、7、18(最好)
rs2 = 7 # 随机森林生成种子  7-0.870423 12-0.869793 18-0.86989
#
# print(data.describe())
# print(data.info())
data.drop_duplicates(inplace=True)    # 去重
'''
异常值处理
'''
data['NumberOfDependents'].fillna(data['NumberOfDependents'].median(),inplace=True)
# 五倍标准差
MonthlyIncome_=data['MonthlyIncome'].mean()+5*data['MonthlyIncome'].std()
# print(MonthlyIncome_)
DebtRatio_=data['DebtRatio'].mean()+5*data['DebtRatio'].std()
RevolvingUtilizationOfUnsecuredLines_ = data['RevolvingUtilizationOfUnsecuredLines'].mean()+5*data['RevolvingUtilizationOfUnsecuredLines'].std()
# 三倍标准差
'''
MonthlyIncome_=data['MonthlyIncome'].mean()+3*data['MonthlyIncome'].std()
DebtRatio_=data['DebtRatio'].mean()+3*data['DebtRatio'].std()
RevolvingUtilizationOfUnsecuredLines_ = data['RevolvingUtilizationOfUnsecuredLines'].mean()+3*data['RevolvingUtilizationOfUnsecuredLines'].std()
'''
data = data[data['NumberRealEstateLoansOrLines'] <= 40]
data = data[(data['NumberOfDependents'] < 20)]
data = data[(data['age'] > 0) & (data['age'] < 100)]
data['SeriousDlqin2yrs']=1-data['SeriousDlqin2yrs'] #数据标签反转
for column in ['NumberOfTime30-59DaysPastDueNotWorse', 'NumberOfTime60-89DaysPastDueNotWorse',
               'NumberOfTimes90DaysLate']:
    data = data[data.loc[:, column] < 80]

mouthlyincome = data["MonthlyIncome"].copy()
'''
补充缺失值
'''
# 0补充缺失值
# data['MonthlyIncome'][np.isnan(data['MonthlyIncome'])] = 0
# 删除有缺失值的数据
# data = data[data['MonthlyIncome'].notnull()]
# 平均值补充缺失值
data['MonthlyIncome'][np.isnan(data['MonthlyIncome'])] = data['MonthlyIncome'].mean()
# 中位数补充缺失值
# data['MonthlyIncome'][np.isnan(data['MonthlyIncome'])] = data['MonthlyIncome'].median()

# 随机森林补充缺失值
'''
from sklearn.ensemble import RandomForestRegressor as rfr
rfr = rfr(n_estimators=50,
          random_state=rs2
          )
# rfr
rfr.fit(data[data["MonthlyIncome"].notnull()][["age","NumberOfOpenCreditLinesAndLoans","NumberRealEstateLoansOrLines"]], data[data["MonthlyIncome"].notnull()]["MonthlyIncome"])
data.loc[data['MonthlyIncome'].isnull(), 'MonthlyIncome'] = rfr.predict(data[data["MonthlyIncome"].isnull()][["age","NumberOfOpenCreditLinesAndLoans","NumberRealEstateLoansOrLines"]])
# tree.plot_tree(rfr)
# plt.show()
print(data["MonthlyIncome"].describe())
'''

data = data[data["MonthlyIncome"]<=MonthlyIncome_]
data = data[data["DebtRatio"]<=DebtRatio_]
data = data[data["RevolvingUtilizationOfUnsecuredLines"] <= RevolvingUtilizationOfUnsecuredLines_]
'''
for column in data.iloc[:,1:].columns:
    data[column] = np.log1p(data[column])
    data[column][np.isinf(np.log1p(data[column]))] = 0
'''

# print(data.info())
print(data["MonthlyIncome"].describe())
print(round(data["MonthlyIncome"]))

sn.distplot(round(data["MonthlyIncome"]), bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None, color=None,
 vertical=False, norm_hist=False, axlabel=None, label=None, ax=None)
# #b40003ff-红色 #00b406ff-绿色
'''
sn.kdeplot(round(data["MonthlyIncome"]), shade=True, vertical=False, kernel='gau',
bw='scott', gridsize=100, cut=3, clip=None, legend=True, cumulative=False,
shade_lowest=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None)
'''
plt.show()

'''
# 决策树分箱
def optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:

        #利用决策树获得最优分箱的边界值列表
 
    boundary = []  # 待return的分箱边界值列表

    x = x.values  # 填充缺失值
    y = y.values

    clf = tree.DecisionTreeClassifier(criterion='entropy',  # “信息熵”最小化准则划分
                                 max_depth = 100,
                                 # max_leaf_nodes=20 ,
                                 max_leaf_nodes=20 , # 最大叶子节点数
                                 min_samples_leaf=0.005,# 叶子节点样本数量最小占比
                                 # random_state=rs2
                                      )

    clf.fit(x.reshape(-1, 1), y)  # 训练决策树
    #reshape(1,-1)：直接变成了一行未知列了
    #reshape(-1,1)：直接变成了一列未知行了
    # print(tree.plot_tree(clf,filled=True))

    # plt.show()

    n_nodes = clf.tree_.node_count
    children_left = clf.tree_.children_left
    children_right = clf.tree_.children_right
    threshold = clf.tree_.threshold

    for i in range(n_nodes):
        if children_left[i] != children_right[i]:  # 获得决策树节点上的划分边界值
            boundary.append(threshold[i])
    print(boundary)
    boundary.sort()

    min_x = x.min()
    max_x = x.max() + 0.1  # +0.1是为了考虑后续groupby操作时，能包含特征最大值的样本
    boundary = [min_x] + boundary + [max_x]
    print(boundary)
    return boundary
# 信息价值计算
def feature_woe_iv(x: pd.Series, y: pd.Series, column, data) -> pd.DataFrame:

        #计算变量各个分箱的WOE、IV值，返回一个DataFrame

    # x = x.fillna(nan)
    boundary = optimal_binning_boundary(x, y)  # 获得最优分箱边界值列表
    print(boundary)
    df = pd.concat([x, y], axis=1)  # 合并x、y为一个DataFrame，方便后续计算
    df.columns = ['x', 'y']  # 特征变量、目标变量字段的重命名
    data.loc[:, column] = pd.cut(data.loc[:, column], bins=boundary, labels=[i for i in range(len(boundary) - 1)],include_lowest=True)  # 获得每个x值所在的分箱区间
    df['bins'] = pd.cut(x=x, bins=boundary, right=False)  # 获得每个x值所在的分箱区间

    grouped = df.groupby('bins')['y']  # 统计各分箱区间的好、坏、总客户数量
    result_df = grouped.agg([('good', lambda y: (y == 0).sum()),
                             ('bad', lambda y: (y == 1).sum()),
                             ('total', 'count')])

    result_df['good_pct'] = result_df['good'] / result_df['good'].sum()  # 好客户占比
    result_df['bad_pct'] = result_df['bad'] / result_df['bad'].sum()  # 坏客户占比
    result_df['total_pct'] = result_df['total'] / result_df['total'].sum()  # 总客户占比

    result_df['bad_rate'] = result_df['bad'] / result_df['total']  # 坏比率

    result_df['woe'] = np.log(result_df['good_pct'] / result_df['bad_pct'])  # WOE
    result_df['iv'] = (result_df['good_pct'] - result_df['bad_pct']) * result_df['woe']  # IV
    if result_df['iv'].sum() <= -0.05:
        data = data.drop([column], axis=1)
    print(column + f"该变量IV = {result_df['iv'].sum()}")

    return data

# data.iloc[:,0] 选择第一列
# data。iloc[0] 选择第一行

for column in data.iloc[:,1:].columns: #选择data第二列到最后一列 # IV值计算
    data = feature_woe_iv(x=data[column], y=data['SeriousDlqin2yrs'],column =column ,data=data) # 进行决策树分箱和iv值计算
data = data.reset_index(drop=True)
# data.to_csv('cs-training1.csv', index=1)


# 数据集划分

from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=rs1) # 0.3
# 根据mnist["target"]来进行分层采样
for train_index, test_index in split.split(data.iloc[:, 1:], data.iloc[:, 0]): # split_split(X,y)
    user_train = data.iloc[train_index]
    user_train_target = user_train['SeriousDlqin2yrs']
    user_test = data.iloc[test_index]
    user_test_target = user_test['SeriousDlqin2yrs']
user_train.to_csv('user_train_DT.csv', index=1)
user_test.to_csv('user_test_DT.csv', index=1)

'''