# from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import math
import pandas as pd
import numpy as np
from sklearn import tree
import matplotlib.pyplot as plt
import seaborn as sn
data=pd.read_csv('cs-training.csv',index_col=[0])
#pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns',None)
column={'ID':'User ID',
        'SeriousDlqin2yrs':'Good and Bad Customers',
        'RevolvingUtilizationOfUnsecuredLines':'Ratio of available credits',
        'age':'Age',
        'NumberOfTimes':'Number Of Times',
        'NumberOfTime30-59DaysPastDueNotWorse':'Number Of Time 30-59 Days Past Due ',
        'DebtRatio':'DebtRatio',
        'MonthlyIncome':'Monthly Income',
        'NumberOfOpenCreditLinesAndLoans':'Number Of Credits',
        'NumberOfTimes90DaysLate':'Number of 90 days past due loans',
        'NumberRealEstate':'Number of loans',
        'NumberRealEstateLoansOrLines':'Fixed Asset Loan Volume',
        'NumberOfTime60-89DaysPastDueNotWorse':'Number Of Time 60-89 Days Past Due',
        'NumberOfDependents':'Number Of family members',
        'DefaultRate':'Default rate',
        'GoodDebt':'Number of credits never defaulted',
        'RemainingIncome':'Monthly Remaining Income',
        'EstateLoan':'Fixed Asset Loan Ratio',
        'Comsumption Loan': 'Level of debt per capita',
        'AverageIncome':'Average income per household member',
        'AverageDebtLevel': 'Number of debt per household member',
        'CreaditUsedRatio':'Personal Credit Used Ratio',
        '30-59DaysRatio': 'Number of 30-59 day past due loans as a proportion of total past due',
        '60-89DaysRatio':  'Number of 60-89 day past due loans as a proportion of total past due',
        'Debt1': 'Whether over-indebted',
        'CreaditRatio':'Whether credit limit is over utilized'}
# data.rename(columns=column,inplace=True)
rs1 = 18
rs2 = 7
#
# print(data.describe())
# print(data.info())
data.drop_duplicates(inplace=True)
'''
Outlier handling
'''
data['NumberOfDependents'].fillna(data['NumberOfDependents'].median(),inplace=True)
# fivefold standard deviation
MonthlyIncome_=data['MonthlyIncome'].mean()+5*data['MonthlyIncome'].std()
# print(MonthlyIncome_)
DebtRatio_=data['DebtRatio'].mean()+5*data['DebtRatio'].std()
RevolvingUtilizationOfUnsecuredLines_ = data['RevolvingUtilizationOfUnsecuredLines'].mean()+5*data['RevolvingUtilizationOfUnsecuredLines'].std()
# threefold standard deviation
'''
MonthlyIncome_=data['MonthlyIncome'].mean()+3*data['MonthlyIncome'].std()
DebtRatio_=data['DebtRatio'].mean()+3*data['DebtRatio'].std()
RevolvingUtilizationOfUnsecuredLines_ = data['RevolvingUtilizationOfUnsecuredLines'].mean()+3*data['RevolvingUtilizationOfUnsecuredLines'].std()
'''
data = data[data['NumberRealEstateLoansOrLines'] <= 40]
data = data[(data['NumberOfDependents'] < 20)]
data = data[(data['age'] > 0) & (data['age'] < 100)]
data['SeriousDlqin2yrs']=1-data['SeriousDlqin2yrs']
for column in ['NumberOfTime30-59DaysPastDueNotWorse', 'NumberOfTime60-89DaysPastDueNotWorse',
               'NumberOfTimes90DaysLate']:
    data = data[data.loc[:, column] < 80]

mouthlyincome = data["MonthlyIncome"].copy()
'''
Completion of missing values
'''

# data['MonthlyIncome'][np.isnan(data['MonthlyIncome'])] = 0
## Delete data with missing values
# data = data[data['MonthlyIncome'].notnull()]
## complete data with mean values
data['MonthlyIncome'][np.isnan(data['MonthlyIncome'])] = data['MonthlyIncome'].mean()
# complete data with median values
# data['MonthlyIncome'][np.isnan(data['MonthlyIncome'])] = data['MonthlyIncome'].median()

# random forest complete values
'''
from sklearn.ensemble import RandomForestRegressor as rfr
rfr = rfr(n_estimators=50,
          random_state=rs2
          )
# rfr
rfr.fit(data[data["MonthlyIncome"].notnull()][["age","NumberOfOpenCreditLinesAndLoans","NumberRealEstateLoansOrLines"]], data[data["MonthlyIncome"].notnull()]["MonthlyIncome"])
data.loc[data['MonthlyIncome'].isnull(), 'MonthlyIncome'] = rfr.predict(data[data["MonthlyIncome"].isnull()][["age","NumberOfOpenCreditLinesAndLoans","NumberRealEstateLoansOrLines"]])
# tree.plot_tree(rfr)
# plt.show()
print(data["MonthlyIncome"].describe())
'''

data = data[data["MonthlyIncome"]<=MonthlyIncome_]
data = data[data["DebtRatio"]<=DebtRatio_]
data = data[data["RevolvingUtilizationOfUnsecuredLines"] <= RevolvingUtilizationOfUnsecuredLines_]
'''
for column in data.iloc[:,1:].columns:
    data[column] = np.log1p(data[column])
    data[column][np.isinf(np.log1p(data[column]))] = 0
'''

# print(data.info())
print(data["MonthlyIncome"].describe())
print(round(data["MonthlyIncome"]))

sn.distplot(round(data["MonthlyIncome"]), bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None, color=None,
 vertical=False, norm_hist=False, axlabel=None, label=None, ax=None)
# #b40003ff-red #00b406ff-green
'''
sn.kdeplot(round(data["MonthlyIncome"]), shade=True, vertical=False, kernel='gau',
bw='scott', gridsize=100, cut=3, clip=None, legend=True, cumulative=False,
shade_lowest=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None)
'''
plt.show()


# decision tree bining
def optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:

        # Obtaining a list of boundary values for the optimal bins using a decision tree
 
    boundary = []

    x = x.values  # complete missing value
    y = y.values

    clf = tree.DecisionTreeClassifier(criterion='entropy',  # smallest entropy
                                 max_depth = 100,
                                 max_leaf_nodes=20 ,
                                 min_samples_leaf=0.005,
                                 # random_state=rs2
                                      )

    clf.fit(x.reshape(-1, 1), y)  # train decision tree
    # print(tree.plot_tree(clf,filled=True))

    # plt.show()

    n_nodes = clf.tree_.node_count
    children_left = clf.tree_.children_left
    children_right = clf.tree_.children_right
    threshold = clf.tree_.threshold

    for i in range(n_nodes):
        if children_left[i] != children_right[i]:
            boundary.append(threshold[i])
    print(boundary)
    boundary.sort()

    min_x = x.min()
    max_x = x.max() + 0.1
    boundary = [min_x] + boundary + [max_x]
    print(boundary)
    return boundary
# calculate IV
def feature_woe_iv(x: pd.Series, y: pd.Series, column, data) -> pd.DataFrame:


    # x = x.fillna(nan)
    boundary = optimal_binning_boundary(x, y)  # boundary values for the optimal bins
    print(boundary)
    df = pd.concat([x, y], axis=1)
    df.columns = ['x', 'y']
    data.loc[:, column] = pd.cut(data.loc[:, column], bins=boundary, labels=[i for i in range(len(boundary) - 1)],include_lowest=True)
    df['bins'] = pd.cut(x=x, bins=boundary, right=False)

    grouped = df.groupby('bins')['y']  # Counting the number of clients
    result_df = grouped.agg([('good', lambda y: (y == 0).sum()),
                             ('bad', lambda y: (y == 1).sum()),
                             ('total', 'count')])

    result_df['good_pct'] = result_df['good'] / result_df['good'].sum()  # Percentage of customers not in default
    result_df['bad_pct'] = result_df['bad'] / result_df['bad'].sum()  # Percentage of customers  in default
    result_df['total_pct'] = result_df['total'] / result_df['total'].sum()

    result_df['bad_rate'] = result_df['bad'] / result_df['total']  # default rate

    result_df['woe'] = np.log(result_df['good_pct'] / result_df['bad_pct'])  # WOE
    result_df['iv'] = (result_df['good_pct'] - result_df['bad_pct']) * result_df['woe']  # IV
    if result_df['iv'].sum() <= -0.05:
        data = data.drop([column], axis=1)
    print(column + f" IV = {result_df['iv'].sum()}")

    return data

# data.iloc[:,0]
# data。iloc[0]

for column in data.iloc[:,1:].columns: # calculate IV
    data = feature_woe_iv(x=data[column], y=data['SeriousDlqin2yrs'],column =column ,data=data)
data = data.reset_index(drop=True)
# data.to_csv('cs-training1.csv', index=1)


# Dataset segmentation

from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=rs1) # 0.3
# Sampling according to mnist["target"]
for train_index, test_index in split.split(data.iloc[:, 1:], data.iloc[:, 0]): # split_split(X,y)
    user_train = data.iloc[train_index]
    user_train_target = user_train['SeriousDlqin2yrs']
    user_test = data.iloc[test_index]
    user_test_target = user_test['SeriousDlqin2yrs']
user_train.to_csv('user_train_DT.csv', index=1)
user_test.to_csv('user_test_DT.csv', index=1)

