from sklearn.tree import DecisionTreeClassifier
import math
import pandas as pd
import numpy as np

from  sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
import warnings

warnings.filterwarnings('ignore')


data=pd.read_csv('trainL.csv',index_col=[0])
#pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns',None)
column={   'id':'贷款清单分配的唯一信用证标识''IV = 0.006370573642305401',
            'isDefault':'负债与否',
            'loanAmnt' :'贷款金额''IV = 0.04664715537296771',
            'term' :'贷款期限（year）''IV = 0.17597614797972133',
            'interestRate' :'贷款利率''IV = 0.4798316759738729',
            'installment':'分期付款金额''IV = 0.0543541457615794',
            'grade': '贷款等级''IV = 0.45622028149990296',
            'subGrade': '贷款等级之子级''IV = 0.49677706118727905',
            'employmentTitle' :'就业职称''IV = 0.0376059295380157',
            'employmentLength' :'就业年限（年）''IV = 0.0009165181175337849',
            'homeOwnership' :'借款人在登记时提供的房屋所有权状况''IV = 0.03272527106855664',
            'annualIncome': '年收入''IV = 0.036598669985527695',
            'verificationStatus': '验证状态''IV = 0.054539551450487236',
            'issueDate' :'贷款发放的月份''IV = 0.036964686109917724',
            'purpose' :'借款人在贷款申请时的贷款用途类别''IV = 0.019647201150325398',
            'postCode' :'借款人在贷款申请中提供的邮政编码的前3位数字''IV = 0.010702441560335733',
            'regionCode' :'地区编码''IV = 0.013018602884429593',
            'dti' :'债务收入比''IV = 0.08255423405665527',
            'delinquency_2years':'借款人过去2年信用档案中逾期30天以上的违约事件数''IV = 0.001959680079101798',
            'ficoRangeLow' :'借款人在贷款发放时的fico所属的下限范围',
            'ficoRangeHigh' :'借款人在贷款发放时的fico所属的上限范围',
            'openAcc' :'借款人信用档案中未结信用额度的数量''IV = 0.0052698866923224855',
            'pubRec' :'贬损公共记录的数量''IV = 0.008044071285116878',
            'pubRecBankruptcies' :'公开记录清除的数量''IV = 0.005856853188710783',
            'revolBal' :'信贷周转余额合计''IV = 0.009779131264291908',
            'revolUtil' :'循环额度利用率，或借款人使用的相对于所有可用循环信贷的信贷金额''IV = 0.02569699174105087',
            'totalAcc' :'借款人信用档案中当前的信用额度总数''IV = 0.004521713753557946',
            'initialListStatus' :'贷款的初始列表状态''IV = 0.001014710143494042',
            'applicationType' :'表明贷款是个人申请还是与两个共同借款人的联合申请''IV = 0.0024458595151213756',
            'earliesCreditLine' :'借款人最早报告的信用额度开立的月份''IV = 0.02203019623709871',
            'title' :'借款人提供的贷款名称''IV = 0.038632450411233074',
            'policyCode' :'公开可用的策略代码 = 1,新产品不公开可用的策略代码 = 2',
             }
# data.rename(columns=column,inplace=True)
rs = 18 # 分层抽样生成种子 # 表现较好-3、7、18

data.drop_duplicates(inplace=True)    # 去重


def Missing_value_handling(train):
    imp_most_freq = SimpleImputer(strategy="most_frequent")

    annualIncome = train.loc[:, "annualIncome"].values.reshape(-1, 1)
    # isDefault = train.loc[:, "isDefault"].values.reshape(-1, 1)
    postCode = train.loc[:, "postCode"].values.reshape(-1, 1)
    dti = train.loc[:, "dti"].values.reshape(-1, 1)
    pubRecBankruptcies = train.loc[:, "pubRecBankruptcies"].values.reshape(-1, 1)
    revolUtil = train.loc[:, "revolUtil"].values.reshape(-1, 1)
    title = train.loc[:, "title"].values.reshape(-1, 1)
    n0 = train.loc[:, "n0"].values.reshape(-1, 1)
    n1 = train.loc[:, "n1"].values.reshape(-1, 1)
    n2 = train.loc[:, "n2"].values.reshape(-1, 1)
    n3 = train.loc[:, "n3"].values.reshape(-1, 1)
    n4 = train.loc[:, "n4"].values.reshape(-1, 1)
    n5 = train.loc[:, "n5"].values.reshape(-1, 1)
    n6 = train.loc[:, "n6"].values.reshape(-1, 1)
    n7 = train.loc[:, "n7"].values.reshape(-1, 1)
    n8 = train.loc[:, "n8"].values.reshape(-1, 1)
    n9 = train.loc[:, "n9"].values.reshape(-1, 1)
    n10 = train.loc[:, "n10"].values.reshape(-1, 1)
    n11 = train.loc[:, "n11"].values.reshape(-1, 1)
    n12 = train.loc[:, "n12"].values.reshape(-1, 1)
    n13 = train.loc[:, "n13"].values.reshape(-1, 1)
    n14 = train.loc[:, "n14"].values.reshape(-1, 1)
    employmentTitle = train.loc[:, "employmentTitle"].values.reshape(-1, 1)
    employmentLength = train.loc[:, "employmentLength"].values.reshape(-1, 1)

    # train.loc[:, "annualIncome"] = imp_most_freq.fit_transform(annualIncome)
    # train.loc[:, "isDefault"] = imp_most_freq.fit_transform(isDefault)
    train.loc[:, "employmentLength"] = imp_most_freq.fit_transform(employmentLength)
    train.loc[:, "postCode"] = imp_most_freq.fit_transform(postCode)
    train.loc[:, "dti"] = imp_most_freq.fit_transform(dti)
    train.loc[:, "pubRecBankruptcies"] = imp_most_freq.fit_transform(pubRecBankruptcies)
    train.loc[:, "revolUtil"] = imp_most_freq.fit_transform(revolUtil)
    train.loc[:, "title"] = imp_most_freq.fit_transform(title)
    train.loc[:, "n0"] = imp_most_freq.fit_transform(n0)
    train.loc[:, "n1"] = imp_most_freq.fit_transform(n1)
    train.loc[:, "n2"] = imp_most_freq.fit_transform(n2)
    train.loc[:, "n3"] = imp_most_freq.fit_transform(n3)
    train.loc[:, "n4"] = imp_most_freq.fit_transform(n4)
    train.loc[:, "n5"] = imp_most_freq.fit_transform(n5)
    train.loc[:, "n6"] = imp_most_freq.fit_transform(n6)
    train.loc[:, "n7"] = imp_most_freq.fit_transform(n7)
    train.loc[:, "n8"] = imp_most_freq.fit_transform(n8)
    train.loc[:, "n9"] = imp_most_freq.fit_transform(n9)
    train.loc[:, "n10"] = imp_most_freq.fit_transform(n10)
    train.loc[:, "n11"] = imp_most_freq.fit_transform(n11)
    train.loc[:, "n12"] = imp_most_freq.fit_transform(n12)
    train.loc[:, "n13"] = imp_most_freq.fit_transform(n13)
    train.loc[:, "n14"] = imp_most_freq.fit_transform(n14)
    #train.loc[:, "employmentTitle"] = imp_most_freq.fit_transform(employmentTitle)

    for data in [train]:
        data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)
        data['employmentLength'].replace('< 1 year', '0 years', inplace=True)
        data['employmentLength'] = data['employmentLength'].apply(lambda x: np.int8(x.split()[0]))
        # print(train['employmentLength'].value_counts(dropna=False).sort_index())
    return train

def grace_Processing(train):
    OrdinalEncoder().fit(train.loc[:, ["grade", "subGrade"]]).categories_
    train.loc[:, ["grade", "subGrade"]] = OrdinalEncoder().fit_transform(train.loc[:, ["grade", "subGrade"]])
    return train


Missing_value_handling(data)
grace_Processing(data)
#data['earliesCreditLine'] = data.loc[:, 'earliesCreditLine'].apply(lambda s: int(s[-4:]))
#data['issueDate'] = data.loc[:, "issueDate"].apply(lambda s: int(s[:4]))

imp_most_freq = SimpleImputer(strategy="most_frequent")

#特征衍生
#年收入/分期付款   IV = 0.11238222472325085
data['Income_installment'] = round(data.loc[:, 'annualIncome'] / data.loc[:, 'installment'], 2)
# 贷款额/分期付款额->期数？'**********'interestRate+subGrade   IV = 0.44282221539388084
data['loanAmnt_installment'] = round(data.loc[:, 'loanAmnt'] / data.loc[:, 'installment'], 2)
# 年收入*收入债务比==债务 IV = 0.01136849120405251
data['debt'] = round(data.loc[:, 'annualIncome'] * data.loc[:, 'dti'], 2)
#   IV = 0.07670943916518724
data['loanAmnt_debt'] = round(data.loc[:, 'annualIncome'] / data.loc[:, 'debt'], 2)
data['loanAmnt_debt'][np.isnan(data['loanAmnt_debt'])] = data['loanAmnt_debt'].min()
data['loanAmnt_debt'][np.isinf(data['loanAmnt_debt'])] = data['loanAmnt_debt'].min()
# 平均收入  IV = 0.0260792999836242
data['avg_income'] = data['annualIncome'] / data['employmentLength']
data['avg_income'][np.isnan(data['avg_income'])] = data['avg_income'].min()
data['avg_income'][np.isinf(data['avg_income'])] = data['avg_income'].min()
# 总收入   IV = 0.017252938374122555
data['total_income'] = data['annualIncome'] * data['employmentLength']
# 平均贷款  IV = 0.06304289918354966
data['avg_loanAmnt'] = data['loanAmnt'] / data['term']
# 平均贷款利率======0.16  IV = 0.2608698278216787
data['mean_interestRate'] = data['interestRate'] / data['term']
# 总付款金额（分期*期限）  IV = 0.10506379825440872
data['all_installment'] = data['installment'] * data['term']
# 平均贷款/年收入  IV = 0.056868079765565856
#data['rest_money_rate'] = data['avg_loanAmnt'] / (data['annualIncome'] + 0.1)  # 287个收入为0
# 闲置资金  IV = 0.04235552184464766
data['rest_money'] = data['annualIncome'] - data['avg_loanAmnt']
# 信用档案中已结信用额度数量 量IV = 0.014464903589669393
data['closeAcc'] = data['totalAcc'] - data['openAcc']
# fico平均分====-0.17  IV = 0.11944945798385581
data['ficoRange_mean'] = (data['ficoRangeHigh'] + data['ficoRangeLow']) / 2

# 确认呢贬损公共记录数量（未清楚贬损公共记录数）  IV = 0.0028090235145379083
data['rest_pubRec'] = data['pubRec'] - data['pubRecBankruptcies']
# 贷款金额-信贷周转余额合计     IV = 0.055741265898980416
data['rest_Revol'] = data['loanAmnt'] - data['revolBal']
data['issueDate'] = data.loc[:, "issueDate"].apply(lambda s: int(s[:4]))
data['earliesCreditLine'] = data.loc[:, 'earliesCreditLine'].apply(lambda s: int(s[-4:]))
#   IV = 0.01274672712502264
#data['CreditLine'] = data.loc[:, 'earliesCreditLine'] - data.loc[:, 'issueDate']
#   0.24811756219737888
data['debt1'] = round(data.loc[:, 'loanAmnt'] * data.loc[:, 'interestRate'] * data.loc[:, 'term'], 2)
#data['zuhu'] = np.log(data.loc[:, 'installment'])-np.log(data.loc[:, 'delinquency_2years'])
# 特征衍生后的数据集信息
print(data.columns)
print(data.info())
print(data.describe())


'''
# 热力图绘制
import matplotlib.pyplot as plt
import seaborn as sn

['isDefault', 'pubRec', 'annualIncome', 'loanAmnt', 'dti', 'totalAcc',
       'interestRate', 'id', 'revolUtil', 'revolBal', 'pubRecBankruptcies',
       'installment', 'delinquency_2years', 'openAcc', 'fico', 'grade',
       'subGrade', 'earliesCreditLine', 'issueDate', 'employmentLength',
       'term', 'employmentTitle', 'homeOwnership', 'verificationStatus',
       'purpose', 'postCode', 'regionCode', 'initialListStatus',
       'applicationType', 'title', 'n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6',
       'n7', 'n8', 'n9', 'n10', 'n11', 'n12', 'n13', 'n14',
       'Income_installment', 'loanAmnt_installment', 'debt', 'loanAmnt_debt',
       'avg_income', 'total_income', 'avg_loanAmnt', 'mean_interestRate',
       'all_installment', 'rest_money_rate', 'rest_money', 'closeAcc',
       'rest_pubRec', 'rest_Revol', 'CreditLine', 'debt1']
bin1=['isDefault', 'pubRec', 'annualIncome', 'loanAmnt', 'dti', 'totalAcc',
       'interestRate', 'id', 'revolUtil', 'revolBal', 'pubRecBankruptcies',]
bin2=['isDefault','installment', 'delinquency_2years', 'openAcc', 'fico', 'grade',
       'subGrade', 'earliesCreditLine', 'issueDate', 'employmentLength',
       'term',]
bin3=['isDefault','employmentTitle', 'homeOwnership', 'verificationStatus',
       'purpose', 'postCode', 'regionCode', 'initialListStatus',
       'applicationType', 'title', 'n0',]
bin4=['isDefault','n1', 'n2', 'n3', 'n4', 'n5', 'n6',
       'n7', 'n8', 'n9', 'n10',]
bin5=['isDefault','n11', 'n12', 'n13', 'n14','Income_installment', 'loanAmnt_installment', 'debt', 'loanAmnt_debt',
       'avg_income', 'total_income', ]
bin6=['isDefault','avg_loanAmnt', 'mean_interestRate',
       'all_installment', 'rest_money_rate', 'rest_money', 'closeAcc',
       'rest_pubRec', 'rest_Revol', 'CreditLine', 'debt1']
corrMatt = data[bin6].corr()

# print(corrMatt)
mask = np.array(corrMatt)
mask[np.tril_indices_from(mask)] = False
fig,ax= plt.subplots()
fig.set_size_inches(20,10)
sn.heatmap(corrMatt, mask=mask,vmax=0.8, square=True,annot=True)
plt.show()
'''

# Embedding层
# 决策树分箱
def optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:

        #利用决策树获得最优分箱的边界值列表
 
    boundary = []  # 待return的分箱边界值列表

    x = x.values  # 填充缺失值
    y = y.values

    clf = DecisionTreeClassifier(criterion='entropy',  # “信息熵”最小化准则划分
                                 # max_depth =200,
                                 max_leaf_nodes=20,  # 最大叶子节点数
                                 min_samples_leaf=0.005)  # 叶子节点样本数量最小占比

    clf.fit(x.reshape(-1, 1), y)  # 训练决策树
    #reshape(1,-1)：直接变成了一行未知列了
    #reshape(-1,1)：直接变成了一列未知行了

    n_nodes = clf.tree_.node_count
    children_left = clf.tree_.children_left
    children_right = clf.tree_.children_right
    threshold = clf.tree_.threshold

    for i in range(n_nodes):
        if children_left[i] != children_right[i]:  # 获得决策树节点上的划分边界值
            boundary.append(threshold[i])

    boundary.sort()

    min_x = x.min()
    max_x = x.max() + 0.1  # +0.1是为了考虑后续groupby操作时，能包含特征最大值的样本
    boundary = [min_x] + boundary + [max_x]

    return boundary
# 信息价值计算
def feature_woe_iv(x: pd.Series, y: pd.Series, column, data) -> pd.DataFrame:

        #计算变量各个分箱的WOE、IV值，返回一个DataFrame

    # x = x.fillna(nan)
    boundary = optimal_binning_boundary(x, y)  # 获得最优分箱边界值列表
    df = pd.concat([x, y], axis=1)  # 合并x、y为一个DataFrame，方便后续计算
    df.columns = ['x', 'y']  # 特征变量、目标变量字段的重命名
    #print('=========')
    #print(boundary)
    print('==========')
    data.loc[:, column] = pd.cut(data.loc[:, column], bins=boundary, labels=[i for i in range(len(boundary) - 1)],include_lowest=True)  # 获得每个x值所在的分箱区间
    df['bins'] = pd.cut(x=x, bins=boundary, right=False)  # 获得每个x值所在的分箱区间

    grouped = df.groupby('bins')['y']  # 统计各分箱区间的好、坏、总客户数量
    result_df = grouped.agg([('good', lambda y: (y == 0).sum()),
                             ('bad', lambda y: (y == 1).sum()),
                             ('total', 'count')])

    result_df['good_pct'] = result_df['good'] / result_df['good'].sum()  # 好客户占比
    result_df['bad_pct'] = result_df['bad'] / result_df['bad'].sum()  # 坏客户占比
    result_df['total_pct'] = result_df['total'] / result_df['total'].sum()  # 总客户占比

    result_df['bad_rate'] = result_df['bad'] / result_df['total']  # 坏比率

    result_df['woe'] = np.log(result_df['good_pct'] / result_df['bad_pct'])  # WOE
    result_df['iv'] = (result_df['good_pct'] - result_df['bad_pct']) * result_df['woe']  # IV
    if result_df['iv'].sum() <= -0.05:
        data = data.drop([column], axis=1)
    print(column + f"该变量IV = {result_df['iv'].sum()}")
    return data
# one-hot编码
'''
def one_hot_encoder(data):
    data_feature = data.iloc[:, 6:] # RevolvingUtilizationOfUnsecuredLines可用额度比例、DebtRatio负债率为离散型
    enc = preprocessing.OneHotEncoder()
    df = enc.fit_transform(data_feature)  # fit来学习编码
    print(enc.categories_)
    df1 = pd.DataFrame(data=df.toarray())
    df1.insert(loc=0, column='SeriousDlqin2yrs', value=data['SeriousDlqin2yrs'])  # value不能用data_feature 因为是data的复本
    df1.insert(loc=1, column='RevolvingUtilizationOfUnsecuredLines', value=data['RevolvingUtilizationOfUnsecuredLines'])
    df1.insert(loc=2, column='age', value=data['age'])
    df1.insert(loc=3, column='DebtRatio', value=data['DebtRatio'])
    data = df1

    return data
'''

# data.iloc[:,0] 选择第一列
# data。iloc[0] 选择第一行

for column in data.iloc[:,1:].columns: #选择data第二列到最后一列 # IV值计算
    data = feature_woe_iv(x=data[column], y=data['isDefault'],column =column ,data=data) # 进行决策树分箱和iv值计算
data = data.reset_index(drop=True)

# from sklearn import preprocessing # one-hot编码实施
# feature = [3,4,6,7,8,9,10,11,12]
# data = one_hot_encoder(data)

# 数据集划分

from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=10, test_size=0.2, random_state=rs) # 0.2 0.3
# 根据mnist["target"]来进行分层采样
for train_index, test_index in split.split(data.iloc[:, 1:], data.iloc[:, 0]): # split_split(X,y)
    user_train = data.iloc[train_index]
    user_train_target = user_train['isDefault']
    user_test = data.iloc[test_index]
    user_test_target = user_test['isDefault']
user_train.to_csv('user_trainL.csv', index=1)
user_test.to_csv('user_testL.csv', index=1)
